Your SlideShare is downloading. ×
  • Like
Operation Oriented Web Applications / Yokohama pm7
Upcoming SlideShare
Loading in...5
×

Thanks for flagging this SlideShare!

Oops! An error has occurred.

×

Now you can save presentations on your phone or tablet

Available for both IPhone and Android

Text the download link to your phone

Standard text messaging rates apply

Operation Oriented Web Applications / Yokohama pm7

  • 3,141 views
Published

 

Published in Technology , Business
  • Full Name Full Name Comment goes here.
    Are you sure you want to
    Your message goes here
    Be the first to comment
No Downloads

Views

Total Views
3,141
On SlideShare
0
From Embeds
0
Number of Embeds
1

Actions

Shares
Downloads
10
Comments
0
Likes
2

Embeds 0

No embeds

Report content

Flagged as inappropriate Flag as inappropriate
Flag as inappropriate

Select your reason for flagging this presentation as inappropriate.

Cancel
    No notes for slide
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n
  • \n

Transcript

  • 1. Operation Oriented Web Applications Yokohama.pm#7 @kazeburo
  • 2. Log::Minimal
  • 3. #!/usr/bin/env perluse strict;use warnings;use Log::Minimal;critf("%s","foo");warnf("%d %s", 1, "foo");sub hoge { infoff("foo"); debugff("bar");};hoge();local $Log::Minimal::AUTODUMP = 1;infof({ key => val });warnf("data is %s", { key => val });
  • 4. $ LM_DEBUG=1 perl /tmp/logminimal.pl2011-05-11T15:57:49 [CRITICAL] foo at /tmp/logminimal.pl line 72011-05-11T15:57:49 [WARN] 1 foo at /tmp/logminimal.pl line 82011-05-11T15:57:49 [INFO] foo at /tmp/logminimal.pl line 11 ,/tmp/logminimal.pl line 142011-05-11T15:57:49 [DEBUG] bar at /tmp/logminimal.pl line 12 ,/tmp/logminimal.pl line 142011-05-11T15:57:49 [INFO] {key => val} at /tmp/logminimal.pl line 172011-05-11T15:57:49 [WARN] data is {key => val} at /tmp/logminimal.plline 18
  • 5. local $Log::Minimal::PRINT = sub { my ( $time, $type, $message, $trace) = @_; print STDERR “[$type] $message $trace”;};local $Log::Minimal::LOG_LEVEL = "WARN";infof("foo"); #print nothingwarnf(“xaicron++”);
  • 6. local $Log::Minimal::AUTODUMP = 1;warnf(“response => %s”,[ 200, [‘Content-Type’,‘text/plain’],[‘OK’]]);# 2011-05-11T15:56:14 [WARN] response => [200,[Content-Type,text/plain],[OK]] at ..sub myerror { local $Log::Minimal::TRACE_LEVEL = 1; infof(@_);}myerror(“foo”);
  • 7. use Log::Minimal;use Plack::Builder;builder {    enable "Log::Minimal", autodump => 1;    sub {        my $env = shift;        warnf("warn message");        debugf("debug message");        ...    }};$ plackup -a demo.psgiHTTP::Server::PSGI: Accepting connections at http://0:5000/2011-05-11T16:32:24 [WARN] [/foo/bar/baz] warn message at /tmp/demo.psgi line 82011-05-11T16:32:24 [DEBUG] [/foo/bar/baz] debug message at /tmp/demo.psgi line 9
  • 8. DBIx::Sunny
  • 9. selectrow_arrayref($query, {}, @bind);selectrow_hashref($query, {}, @bind);selectall_arrayref($query, { Slice => {} }, @bind);prepare($query) && execute(@bind)
  • 10. use DBIx::Sunny;my $dbh = DBIx::Sunny->connect(...);use DBI;use DBIx::Sunny;my $dbh = DBIx->connect(...,{ RootClass => ‘DBIx::Sunny’});
  • 11. DBIx::Sunny::Schema
  • 12. package NoNoPaste::Data;use parent qw/DBIx::Sunny::Schema/;__PACKAGE__->query( add_entry, id => Str, nick => { isa => Str, default => anonymouse }, body => Str, q{INSERT INTO entries ( id, nick, body, ctime ) values ( ?, ?, ?, NOW() )},);__PACKAGE__->select_row( entry, id => Uint, q{SELECT id,nick,body,ctime FROM entries WHERE id =?};);__PACKAGE__->select_all( entries_multi, id => { isa => ArrayRef[Uint] }, q{SELECT id,nick,body,ctime FROM entries WHERE id IN (?)});
  • 13. use parent qw/DBIx::Sunny::Schema/;__PACKAGE__-> ( , => / , [ => / ,[..]], ‘ ’,);
  • 14. prepare && execute && fetchrow_arrayref->[0];prepare && execute && fetchrow_hashref;prepare && execute && push @result, $_ while fetchrow_hash;prepare && execute
  • 15. my $dbh = DBI->connect(...);my $master = NoNoPaste::Data->new( dbh => $dbh );# readonly querymy $slave = NoNoPaste::Data->new( dbh => $dbh, readonly => 1 );#my $row = $master->add_entry( id => $id, nick => $nick, body => $body,);#my $rows = $slave->entry_list( offset => $offset );
  • 16. GreenBuckets
  • 17. use Digest::MurmurHash qw/murmur_hash/;for ( 1..100 ) { say murmur_hash(sprintf "test%03d", $_ );}
  • 18. use Digest::MurmurHash qw/murmur_hash/;for ( 1..100 ) { say murmur_hash(sprintf "test%03d", $_ );}
  • 19. CREATE TABLE objects ( id INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT, fid INT UNSIGNED NOT NULL, bucket_id INT UNSIGNED NOT NULL, rid SMALLINT UNSIGNED NOT NULL, gid SMALLINT UNSIGNED NOT NULL, filename VARCHAR(1024), INDEX (fid, bucket_id), INDEX (bucket_id)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_binSELECT * FROM objects WHERE fid = murmur($filename) ANDbucket_id = $bucket AND filename = $filename
  • 20. CREATE TABLE entries_int ( id INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT, fid INT UNSIGNED NOT NULL, bid INT UNSIGNED NOT NULL, filename VARCHAR(255) NOT NULL, INDEX (fid, bid), INDEX (bid)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_binCREATE TABLE entries_char ( id INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT, filename VARCHAR(255) NOT NULL, bid INT UNSIGNED NOT NULL, UNIQUE INDEX (filename, bid), INDEX (bid)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin
  • 21. .--------------------------------------------------.| table_name | total_kb | data_kb | index_kb |+--------------+-----------+-----------+-----------+| entries_char | 3360.0000 | 1552.0000 | 1808.0000 || entries_int | 2080.0000 | 1552.0000 | 528.0000 |--------------+-----------+-----------+----------- (filename 32 )
  • 22. @nodes = sort { murmur_hash(join "/", $a->{node_id},$bucket,$filename) <=> murmur_hash(join "/", $b->{node_id},$bucket,$filename)} @nodes;