#!/usr/bin/perl

eval 'exec /usr/bin/perl  -S $0 ${1+"$@"}'
    if 0; # not running under some shell

# This program efficiently checksums MySQL tables on one or more servers.
#
# This program is copyright (c) 2007 Baron Schwartz.
# Feedback and improvements are welcome.
#
# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, version 2; OR the Perl Artistic License.  On UNIX and similar
# systems, you can issue `man perlgpl' or `man perlartistic' to read these
# licenses.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA  02111-1307  USA.

use strict;
use warnings FATAL => 'all';

use DBI;
use English qw(-no_match_vars);
use Getopt::Long;
use List::Util qw(min max);
use POSIX qw(ceil);
use Term::ReadKey;

our $VERSION = '1.1.14';
our $DISTRIB = '848';
our $SVN_REV = sprintf("%d", q$Revision: 839 $ =~ m/(\d+)/g);

$OUTPUT_AUTOFLUSH = 1;

# ############################################################################
# Get configuration information.
# ############################################################################

# TODO: enable chunking on character columns.
# TODO: add --ignorecol
# TODO: add --ignoretype
# TODO: retry on deadlocks
# TODO: make replcheck just check each server specified.

# Define cmdline args; each is GetOpt::Long spec, whether required,
# human-readable description.  Add more hash entries as needed.
my @opt_spec = (
   { s => 'algorithm|a=s',     d => 'Checksum algorithm (ACCUM|CHECKSUM|BIT_XOR)' },
   { s => 'askpass',           d => 'Prompt for username and password for connections' },
   { s => 'chunkcol=s',        d => 'Explicitly specifies a column to use for chunking' },
   { s => 'chunksize|C=i',     d => 'Number of rows to checksum at a time (disallows -a CHECKSUM)' },
   { s => 'chunksize-exact',   d => 'Try not to exceed chunk sizes' },
   { s => 'databases|d=s',     d => 'Only do this comma-separated list of databases' },
   { s => 'defaults-file|F=s', d => 'Only read default options from the given file' },
   { s => 'emptyrepltbl',      d => 'Empty table given by --replicate before starting' },
   { s => 'engine|e=s',        d => 'Only do this comma-separated list of storage engines' },
   { s => 'explain',           d => 'Show, but do not execute, checksum queries' },
   { s => 'explainhosts',      d => 'Print connection information and exit' },
   { s => 'float-precision=i', d => 'Precision for FLOAT and DOUBLE column comparisons' },
   { s => 'function|f=s',      d => 'Cryptographic hash function (SHA1, MD5...)' },
   { s => 'help',              d => 'Show this help message' },
   { s => 'ignoredb|g=s',      d => 'Ignore this comma-separated list of databases' },
   { s => 'ignoretbl|n=s',     d => 'Ignore this comma-separated list of tables' },
   { s => 'index|i=s',         d => 'Index to use for ACCUM checksum algorithm' },
   { s => 'lock|k',            d => 'Lock table on master until done on slaves (implies -l)' },
   { s => 'count|r!',          d => 'Do the count (default)' },
   { s => 'crc|c!',            d => 'Do the CRC (default)' },
   { s => 'optxor|o!',         d => 'Optimize BIT_XOR with user variables (default)'},
   { s => 'password|p=s',      d => 'Password to use when connecting' },
   { s => 'port|P=i',          d => 'Port number to use for connection' },
   { s => 'quiet|q',           d => 'Do not print checksum results' },
   { s => 'recursecheck',      d => 'Do --replcheck recursively (implies --replcheck)' },
   { s => 'replcheck',         d => 'Connect to slaves and check --replicate results' },
   { s => 'replicate|R=s',     d => 'Replicate checksums in a table (disallows -a CHECKSUM)' },
   { s => 'slavelag|l',        d => 'Report how far slaves lag master' },
   { s => 'separator|s=s',     d => 'Separator for CONCAT_WS()' },
   { s => 'sleep=i',           d => 'Sleep time between checksums' },
   { s => 'sleep-coef=f',      d => 'Sleep time as a multiple of last checksum time' },
   { s => 'socket|S=s',        d => 'Socket file to use for connection' },
   { s => 'tab|b',             d => 'Output separated with tabs' },
   { s => 'tables|t=s',        d => 'Only do this comma-separated list of tables' },
   { s => 'user|u=s',          d => 'User for login if not current user' },
   { s => 'verify|v!',         d => 'Verify checksum compatibility across servers (default)' },
   { s => 'version',           d => 'Output version information and exit' },
   { s => 'wait|w=i',          d => 'How long to do MASTER_POS_WAIT() on slaves (implies -kl)' },
   { s => 'where|W=s',         d => 'Only do rows matching this WHERE clause (disallows -a CHECKSUM)' },
);

# This is the container for the command-line options' values to be stored in
# after processing.  Initial values are defaults.
my %opts = (
   W => '',
   o => 1,
   r => 0,
   c => 1,
   s => '#',
   v => 1,
);

# Post-process...
my %opt_seen;
foreach my $spec ( @opt_spec ) {
   my ( $long, $short ) = $spec->{s} =~ m/^([\w-]+)(?:\|([^!+=]*))?/;
   $spec->{k} = $short || $long;
   $spec->{l} = $long;
   $spec->{t} = $short;
   $spec->{n} = $spec->{s} =~ m/!/;
   $opts{$spec->{k}} = undef unless defined $opts{$spec->{k}};
   die "Duplicate option $spec->{k}" if $opt_seen{$spec->{k}}++;
}

Getopt::Long::Configure('no_ignore_case', 'bundling');
GetOptions( map { $_->{s} => \$opts{$_->{k}} } @opt_spec) or $opts{help} = 1;

if ( $opts{version} ) {
   print "mysql-table-checksum  Ver $VERSION Distrib $DISTRIB Changeset $SVN_REV\n";
   exit(0);
}

# Post-process command-line options and arguments.
$opts{k} ||= defined $opts{w};
$opts{l} ||= $opts{k};
$opts{replcheck} ||= $opts{recursecheck};

# Make comma-separated lists into hashes.
if ( $opts{d} ) {
   $opts{d} = { map { $_ => 1 } split(/,\s*/, $opts{d}) };
}
$opts{g} = { map { $_ => 1 } split(/,\s*/, $opts{g} || '') };
if ( $opts{t} ) {
   $opts{t} = { map { $_ => 1 } split(/,\s*/, $opts{t}) };
}
$opts{n} = { map { $_ => 1 } split(/,\s*/, $opts{n} || '') };
if ( $opts{e} ) {
   $opts{e} = { map { lc($_) => 1 } split(/,\s*/, $opts{e}) };
}

# Don't let someone put a ' in the separator
$opts{s} =~ s/'//g;

# Create a WHERE clause
$opts{W} = $opts{W} ? " WHERE ($opts{W})" : ' WHERE (1=1)';

if ( $opts{a} && $opts{a} !~ m/^(?:ACCUM|CHECKSUM|BIT_XOR)$/ ) {
   warn "--algorithm=$opts{a}: unknown algorithm.\n";
   $opts{help} = 1;
}

if ( !@ARGV ) {
   warn "No hosts specified.\n";
   $opts{help} = 1;
}

my %conn   = (
   h => 'host',
   P => 'port',
   S => 'mysql_socket',
   u => 'user',
   p => 'pass',
   F => 'mysql_read_default_file',
   D => 'database',
);

my @hosts;
foreach my $arg ( unique(@ARGV) ) {
   push @hosts, parse_dsn($arg, $hosts[0]);
}

if ( $opts{explain} ) {
   @hosts = $hosts[0];
}

if ( $opts{explainhosts} ) {
   foreach my $host ( @hosts ) {
      print "Server $host->{h}:\n" . join("   \n",
         map { sprintf('%25s %s', $conn{$_}, (defined $host->{$_} ? $host->{$_} : '[null]') ) } sort keys %conn),
         "\n";
   }
   exit(0);
}

if ( !$opts{help} ) {
   if ( $opts{replcheck} && !$opts{R} ) {
      warn "--replcheck requires --replicate.\n";
      $opts{help} = 1;
   }
   elsif ( $opts{R} && @hosts > 1 ) {
      warn "You can only specify one host with --replicate.\n";
      $opts{help} = 1;
   }
}

if ( $opts{help} ) {
   print "Usage: mysql-table-checksum [OPTION]... HOST [HOST...]\n\n";
   my $maxw = max(map { length($_->{l}) + ($_->{n} ? 4 : 0)} @opt_spec);
   foreach my $spec ( sort { $a->{l} cmp $b->{l} } @opt_spec ) {
      my $long  = $spec->{n} ? "[no]$spec->{l}" : $spec->{l};
      my $short = $spec->{t} ? "-$spec->{t}" : '';
      printf("  --%-${maxw}s %-4s %s\n", $long, $short, $spec->{d});
   }
   print <<USAGE;

mysql-table-checksum efficiently checksums MySQL tables on one or more hosts.
If you specify multiple hosts, the first is assumed to be the master.
Connection options are read from MySQL option files.  For more details, please
read the documentation:

   perldoc mysql-table-checksum

USAGE
   exit(0);
}

# ############################################################################
# Ready to work now.
# ############################################################################

my $exit_status = 0;
my $EPOCH       = '1970-01-01';

my %int_types = map { $_ => 1 }
   qw( bigint date datetime int mediumint smallint time timestamp tinyint year );
my %real_types = map { $_ => 1 }
   qw( decimal double float );

# TODO: Unsupported types:
# BIT SET ENUM
# BINARY BLOB CHAR CHAR BYTE LONGBLOB LONGTEXT MEDIUMBLOB
# MEDIUMTEXT TEXT TINYBLOB TINYTEXT VARBINARY VARCHAR
# TODO: ENUM and SET can be treated as integers without defeating indexes.

my $db_options = {
   RaiseError => 1,
   PrintError => 0,
   AutoCommit => $opts{k} ? 0 : 1,
};

my %db_info; # holds user/pass etc for each host

my $main_dbh  = get_dbh($hosts[0], ($opts{d} ? keys %{$opts{d}} : '' ));

# ############################################################################
# Check replication slaves if desired.
# ############################################################################
if ( $opts{replcheck} ) {
   my %seen;
   my @hosts_to_do;
   my $exit = 0;

   # Start the checking.
   if ( version_ge($main_dbh, '3.23.26') ) {
      my ($id) = $main_dbh->selectrow_array('SELECT @@SERVER_ID');
      push @hosts_to_do,
         grep { $_->{Master_id} == $id } # Only its own slaves
         @{$main_dbh->selectall_arrayref("SHOW SLAVE HOSTS", { Slice => {} })};
      # Add the specified server (the master) as well.  This way the server
      # will get checked if someone specifies the root of some sub-tree in the
      # replication topology.
      push @hosts_to_do, {
         Server_id => $id,
         dbh       => $main_dbh,
         Host      => $hosts[0]->{h},
         Port      => $hosts[0]->{P},
      };
   }
   else {
      print_err("Cannot check slaves; --replcheck requires MySQL 3.23.26 or newer.");
   }

   my $sql = "SELECT db, tbl, chunk, boundaries, this_cnt-master_cnt AS cnt_diff, "
      . "this_crc <> master_crc OR ISNULL(master_crc) <> ISNULL(this_crc) AS crc_diff "
      . "FROM $opts{R} "
      . "WHERE master_cnt <> this_cnt OR master_crc <> this_crc "
      . "OR ISNULL(master_crc) <> ISNULL(this_crc)";

   SLAVE:
   while ( @hosts_to_do ) {
      my $slave = shift @hosts_to_do;
      next SLAVE if $seen{$slave->{Server_id}};

      # Connect to that server (if needed)
      my $slave_dbh;
      if ( !$slave->{dbh} ) {
         my $dsn    = parse_dsn("h=$slave->{Host},P=$slave->{Port}", $hosts[0]);
         $slave_dbh = get_dbh($dsn, ($opts{d} ? keys %{$opts{d}} : '' ));
         $slave->{dbh} = $slave_dbh;
      }
      else {
         $slave_dbh = $slave->{dbh};
      }
      my $id;

      # SHOW SLAVE HOSTS is somewhat flakey.  Sometimes it shows obsolete
      # information.  Verify that this instance has the server ID reported by
      # its master.
      if ( version_ge($main_dbh, '3.23.26') ) {
         ($id) = $slave_dbh->selectrow_array('SELECT @@SERVER_ID');
         $seen{$id}++;
         next SLAVE unless $id == $slave->{Server_id};
      }
      else {
         print_err("Cannot check $slave->{Host}:$slave->{Port}; requires MySQL 3.23.26 or newer.");
         next SLAVE;
      }

      # Recurse to its slaves, if desired
      if ( $opts{recursecheck} ) {
         eval {
            push @hosts_to_do,
               grep { $_->{Master_id} == $id } # Only its own slaves
               @{$slave_dbh->selectall_arrayref("SHOW SLAVE HOSTS", { Slice => {} })};
         };
         if ( $EVAL_ERROR ) {
            print_err("Cannot recurse from $slave->{Host}:$slave->{Port}");
            print_err($EVAL_ERROR);
         }
      }

      my $chunks = $slave_dbh->selectall_arrayref($sql, { Slice => {} });

      if ( !$opts{q} ) {
         delete $slave->{Rpl_recovery_rank};
         print "\nResults on "
            . join(':', map  { "$slave->{$_}" }
                        grep { $slave->{$_} }
                             qw(Host Port)
               )
            . " ("
            . join(',', map  { "$_=$slave->{$_}" }
                        grep { $_ ne 'dbh' && $slave->{$_} }
                             keys %$slave
               )
            . ")\n";
      }

      if ( $chunks && @$chunks ) {
         $exit = 1;
         if ( !$opts{q} ) {
            # Design a header to align the results
            my $max_db   = max(5, map { length($_->{db})  } @$chunks);
            my $max_tbl  = max(5, map { length($_->{tbl}) } @$chunks);
            my $fmt      = "%-${max_db}s %-${max_tbl}s %5s %8s %8s %s\n";

            # Print the header and the rows
            printf($fmt, qw(db tbl chunk cnt_diff crc_diff boundaries));
            foreach my $chunk ( @$chunks ) {
               printf($fmt, @{$chunk}{qw(db tbl chunk cnt_diff crc_diff boundaries)});
            }
         }
      }
   }

   exit($exit);
}

# ############################################################################
# Otherwise do the checksums.
# ############################################################################
$main_dbh->{InactiveDestroy} = 1; # Can't be set in $db_options
my @databases = @{$main_dbh->selectcol_arrayref('SHOW DATABASES')};

# Figure out what strategy to use
my $strat;
if ( (!$opts{a} || $opts{a} eq 'CHECKSUM')   # By default, use CHECKSUM TABLE
   && !$opts{W} && !$opts{C}                 # CHECKSUM does whole table
   && !$opts{R}                              # CHECKSUM can't be INSERT.. SELECTed
   && version_ge($main_dbh, '4.1.1') )       # CHECKSUM doesn't exist before this
{
   if ( !$opts{a} && $opts{r} ) {
      # The user didn't specify an algorithm, but wants a count, which is
      # inefficient with CHECKSUM TABLE.  Since the user didn't explicitly
      # demand this poor strategy, we fall back to another.
      $strat = 'ACCUM';
   }
   else {
      # Alas, the user wants me to be stupid and run two queries where one would
      # suffice.  (This can actually be a good strategy on MyISAM, and is even
      # better when MyISAM's live checksums are enabled).
      $strat = 'CHECKSUM';
   }
}
elsif (!$opts{a} || $opts{a} eq 'ACCUM') {   # Fall back to ACCUM
   $strat = 'ACCUM';
}
elsif ( $opts{a} eq 'BIT_XOR'
   && version_ge($main_dbh, '4.1.1') )       # BIT_XOR() doesn't exist before this
{
   $strat = 'BIT_XOR';
}
else {
   $strat = 'ACCUM';                         # Fallback if all else fails
}
if ( $opts{a} && $opts{a} ne $strat ) {
   warn "--algorithm=$opts{a} can't be used; falling back to $strat\n";
}

# If using a cryptographic hash strategy, decide what hash function to use.
my $func = $opts{f} || 'SHA1';
if ( $strat =~ m/ACCUM|BIT_XOR/ ) {
   my $res = eval { $main_dbh->do("SELECT $func('test-string')") };
   if ( !$res ) {
      my $err = '';
      if ( $EVAL_ERROR && $EVAL_ERROR =~ m/failed: (.*?) at \S+ line/ ) {
         $err = qq{ because "$1"};
      }
      if ( lc $func eq 'md5' ) {  # There's nothing to fall back to
         die "Cryptographic function $func cannot be used$err\n";
      }
      warn "Cryptographic function $func cannot be used$err; falling back to MD5()\n";
      $func = 'MD5';
   }
}

my $crc_wid   = max(16, length(($main_dbh->selectrow_array("SELECT $func('a')"))[0]));
my $crc_slice = 0;
my $sanity    = '';

# Figure out which slice in a sliced BIT_XOR checksum should have the actual
# concat-columns-and-checksum, and which should just get variable references.
if ( $opts{o} && $strat eq 'BIT_XOR' ) {
   my $desired = uc $main_dbh->selectall_arrayref("SELECT $func('a')")->[0]->[0];
   my $result  = '';
   my $start   = 1;

   do { # Try different positions till sliced result equals non-sliced.
      $main_dbh->do('SET @crc := NULL, @cnt := 0');
      my $slices = make_slices("\@crc := $func('a')");
      my $sanity = "SELECT CONCAT($slices) as test from (select null) as t";
      $result    = $main_dbh->selectall_arrayref($sanity)->[0]->[0];
      if ( $result ne $desired ) {
         $start += 16;
         ++$crc_slice;
      }
   } while ( $start < $crc_wid && $result ne $desired );

   if ( $result ne $desired ) {
      # Disable the user-variable optimization.
      warn "Cannot get good checksums with --optxor, disabling.\n";
      $opts{o} = 0;
   }
}

if ( $opts{v} && @hosts > 1 ) {
   # Verify that CONCAT_WS is compatible across all servers.  On older versions
   # of MySQL it skips both empty strings and NULL; on newer just NULL.
   my @verify_sums;
   my @verify_vars;
   foreach my $host ( @hosts ) {
      my $dbh = get_dbh($host, ($opts{d} ? keys %{$opts{d}} : '' ));
      my $cks = $dbh->selectall_arrayref("SELECT MD5(CONCAT_WS(',', '1', ''))")->[0]->[0];
      push @verify_sums, { host => $host->{h}, ver => $dbh->{mysql_serverinfo}, sum => $cks };
      if ( $sanity ) {
         # Also verify all servers treat user-variables the same way.
         $dbh->do('SET @crc := NULL, @cnt := 0');
         $cks = $dbh->selectall_arrayref($sanity)->[0]->[0];
         push @verify_vars, { host => $host->{h}, ver => $dbh->{mysql_serverinfo}, sum => $cks };
      }
   }
   if ( unique(map { $_->{sum} } @verify_sums ) > 1 ) {
      my $max = max(map { length($_->{h}) } @hosts);
      die "Not all servers have compatible versions.  Some return different\n"
         . "checksum values for the same query, and cannot be compared.  This\n"
         . "behavior changed in MySQL 4.0.14.  Here is info on each host:\n\n"
         . join("\n",
            map { sprintf("%-${max}s %-32s %s", @{$_}{qw(host sum ver)}) }
            { host => 'HOST', sum => 'CHECKSUM', ver => 'VERSION'}, @verify_sums)
         . "\n\nYou can disable this check with --noverify.\n";
   }

   # Also check @verify_vars (not @verify_sums... I almost deleted this as
   # crap code till I noticed that it's not the same variable name).
   if ( unique(map { $_->{sum} } @verify_vars ) > 1 ) {
      # No need to die, just disable the variable optimization.
      $opts{o} = 0;
   }
}

my ($fetch_sth, $update_sth);
if ( $opts{R} ) {
   # Check for the existence of, and optionally clean out, the replication table before starting.
   $main_dbh->do("DESCRIBE $opts{R}"); # Ignore the result; this is a no-op
   if ( $opts{emptyrepltbl} ) {
      my $del_sql = "DELETE FROM $opts{R}";
      if ( $opts{d} ) {
         $del_sql .= " WHERE ";
         $del_sql .= "db IN(" . join(',', map { $main_dbh->quote($_) } keys %{$opts{d}}) . ')';
      }
      if ( $opts{t} ) {
         $del_sql .= $opts{d} ? ' AND ' : ' WHERE ';
         $del_sql .= "tbl IN(" . join(',', map { $main_dbh->quote($_) } keys %{$opts{t}}) . ')';
      }
      $main_dbh->do($del_sql);
   }
   $fetch_sth = $main_dbh->prepare(
      "SELECT this_crc, this_cnt FROM $opts{R} WHERE db = ? AND tbl = ? AND chunk = ?");
   $update_sth = $main_dbh->prepare(
      "UPDATE $opts{R} SET master_crc = ?, master_cnt = ? WHERE db = ? AND tbl = ? AND chunk = ?");
}

DATABASE:
foreach my $database ( @databases ) {
   next DATABASE if
      ( $opts{d} && !exists($opts{d}->{$database}) )
      || $database =~ m/^(information_schema|lost\+found)$/mi
      || exists $opts{g}->{$database};

   my @tables = @{$main_dbh->selectcol_arrayref("SHOW TABLES FROM `$database`")};
   next DATABASE unless @tables;

   my %info_for;
   TABLE:
   foreach my $table ( @tables ) {
      next TABLE if
         ( $opts{t} && !exists($opts{t}->{$table}) )
         || ( $opts{R} && $opts{R} eq "$database.$table" )
         || exists $opts{n}->{$table};

      # Get the table type, and a query to generate a checksum for it.
      eval {
         my $info = checksum_query( $main_dbh, $database, $table );

         # Skip views, and tables of the wrong engine.
         if ( $info->{engine} ne 'VIEW' && (!$opts{e} || $opts{e}->{lc($info->{engine})}) ) {
            $info_for{$table} = $info;
         }

      };
      if ( $EVAL_ERROR ) {
         print_err($EVAL_ERROR, $database, $table);
      }

   }

   next DATABASE unless %info_for;

   # Design and print header
   my ($hdr, $explain);
   if ( $opts{b} ) {
      $hdr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n";
      $explain = "%s\t%s\t%s\n";
   }
   else {
      my $max_tbl  = max(5, map { length($_) } keys %info_for);
      my $max_db   = max(8, length($database));
      my $max_host = max(4, map { length($_->{h}) } @hosts);
      $hdr         = "%-${max_db}s %-${max_tbl}s %5s %-${max_host}s %-6s %10s %${crc_wid}s %4s %4s %4s %4s\n";
      $explain     = "%-${max_db}s %-${max_tbl}s %s\n";
   }
   my @hdr_args = qw(DATABASE TABLE CHUNK HOST ENGINE COUNT CHECKSUM TIME WAIT STAT LAG);
   printf($hdr, @hdr_args) unless ($opts{q} || $opts{explain});

   TABLE:
   foreach my $table ( sort keys %info_for ) {
      eval {
         my $info      = $info_for{$table};

         # Chunking.  Gah.  Complicated bad stuff.  Put it off in a subroutine,
         # it will make it all better.  Chunks are either arrayref of start/end,
         # undef for the NULL chunk, or the string '' for a table that's not
         # chunked.
         if ( $info->{chunk_col} && $opts{C} ) {
            calculate_chunks($info);
         }
         else {
            $info->{chunks}    = [ '' ];
            $info->{chunk_tot} = 1;
         }

         # Do a chunk at a time.
         foreach my $chunk_num ( 0 .. scalar(@{$info->{chunks}}) - 1 ) {
            $info->{chunk_cur} = $chunk_num + 1;
            my $chunk_start_time = time;
            if ( $opts{R} ) { # We're in --replicate mode.
               do_tbl_replicate($hosts[0], $info, $hdr, $explain);
            }

            else {

               # Lock table and get master position on the master, if applicable.
               $main_dbh->do("LOCK TABLES `$database`.`$table` READ")
                  if ($opts{k} && !$opts{explain});
               if ( defined $opts{w} && !$opts{explain} ) {
                  my $master_status = $main_dbh->selectrow_hashref('SHOW MASTER STATUS');
                  @{$info}{keys %$master_status} = values %$master_status;
               }

               my %children;
               foreach my $i ( 0 .. $#hosts ) {
                  my $host = $hosts[$i];
                  my $pid = @hosts > 1 ? fork() : undef;
                  if ( @hosts == 1 || (defined($pid) && $pid == 0) ) { # I am a child
                     eval {
                        do_tbl($host, $info, $i == 0, $hdr, $explain);
                     };
                     if ( $EVAL_ERROR ) {
                        print_err($EVAL_ERROR, $database, $table);
                        exit(1) if @hosts > 1; # exit only if I'm a child
                     }
                     exit(0) if @hosts > 1; # exit only if I'm a child
                  }
                  elsif ( @hosts > 1 && !defined($pid) ) {
                     die("Unable to fork!");
                  }
                  # I already exited if I'm a child, so I'm the parent.
                  $children{$host->{h}} = $pid if @hosts > 1;
               }

               # Wait for the children to exit.
               foreach my $host ( keys %children ) {
                  my $pid = waitpid($children{$host}, 0);
                  $exit_status = $CHILD_ERROR;
               }
               $main_dbh->do("UNLOCK TABLES") if ($opts{k} && !$opts{explain});

            }

            if ( $opts{sleep} && !$opts{explain} ) {
               sleep($opts{sleep});
            }
            elsif ( $opts{'sleep-coef'} && !$opts{explain} ) {
               my $sleep_time = ceil( (time - $chunk_start_time) * $opts{'sleep-coef'} );
               sleep($sleep_time);
            }

         }
      };
      if ( $EVAL_ERROR ) {
         print_err($EVAL_ERROR, $database, $table);
      }
   }
}

# Side effects: modifies $info.  I moved it here just to clear up the logic in
# the main loop a bit.
sub calculate_chunks {
   my ( $info ) = @_;

   my $table_min;
   my $table_max;
   my $num_rows;
   my $chunk_size;
   my @chunks;

   # Determine the range of values for the chunk_col column on this table.
   my $chunk_sql = "SELECT MIN($info->{chunk_col}), MAX($info->{chunk_col}) "
      . "FROM `$info->{database}`.`$info->{table}`$opts{W}";
   if ( $info->{chunk_null} && !version_ge($main_dbh, '4.0.0') ) {
      # MySQL 3.23 will return NULL as the minimum column value and break my
      # test suite when there is a row with NULL in the chunk column.
      $chunk_sql .= "AND $info->{chunk_col} IS NOT NULL";
   }

   ( $table_min, $table_max ) = $main_dbh->selectrow_array($chunk_sql);

   my $expl = $main_dbh->selectrow_hashref(
      "EXPLAIN SELECT * FROM `$info->{database}`.`$info->{table}"
      . "`$opts{W} AND $info->{chunk_col} IS NOT NULL");

   # This isn't always reliable.  Sometimes EXPLAIN will say there are rows
   # when the table is empty.
   $num_rows = $expl->{rows};

   if ( $num_rows && defined $table_min && defined $table_max ) { # $num_rows is unreliable

      # Determine chunk size in "distance between endpoints" that will give
      # approximately the right number of rows between the endpoints.  Also
      # find the start/end points as a number that Perl can do + and < on.
      my ($range_func, $start_point, $end_point);
      if ( $info->{chunk_type} =~ m/(?:int|year|float|double|decimal)$/ ) {
         $start_point = $table_min;
         $end_point   = $table_max;
         $range_func  = \&range_num;
      }
      elsif ( $info->{chunk_type} eq 'timestamp' ) {
         ($start_point, $end_point) = $main_dbh->selectrow_array(
            "SELECT UNIX_TIMESTAMP('$table_min'), UNIX_TIMESTAMP('$table_max')");
         $range_func  = \&range_timestamp;
      }
      elsif ( $info->{chunk_type} eq 'date' ) {
         ($start_point, $end_point) = $main_dbh->selectrow_array(
            "SELECT TO_DAYS('$table_min'), TO_DAYS('$table_max')");
         $range_func  = \&range_date;
      }
      elsif ( $info->{chunk_type} eq 'time' ) {
         ($start_point, $end_point) = $main_dbh->selectrow_array(
            "SELECT TIME_TO_SEC('$table_min'), TIME_TO_SEC('$table_max')");
         $range_func  = \&range_time;
      }
      elsif ( $info->{chunk_type} eq 'datetime' ) {
         # Newer versions of MySQL could use TIMESTAMPDIFF, but it's easier
         # to maintain just one kind of code, so I do it all with DATE_ADD().
         $start_point = timestampdiff($main_dbh, $table_min);
         $end_point   = timestampdiff($main_dbh, $table_max);
         $range_func  = \&range_datetime;
      }
      else {
         die "I don't know how to chunk $info->{chunk_type}\n";
      }

      # The endpoints could easily be undef, because of things like dates that
      # are '0000-00-00'.  The only thing to do is make them zeroes and
      # they'll be done in a single chunk then.
      if ( !defined $start_point ) {
         $start_point = 0;
      }
      if ( !defined $end_point || $end_point < $start_point ) {
         $end_point = 0;
      }

      # Calculate the chunk size, in terms of "distance between endpoints."  If
      # possible and requested, forbid chunks from being any bigger than
      # specified.  Add 1 to the range because the interval is half-open,
      # that is, it is inclusive on the upper end.  (If the table's min value is
      # 1 and the max is 100, that's 100 values to cover, not 99).
      $chunk_size = ceil( $opts{C} * (($end_point - $start_point) + 1) / $num_rows ) || $opts{C};
      if ( $opts{'chunksize-exact'} && $info->{chunk_exact} ) {
         $chunk_size = $opts{C};
      }

      # Generate a list of chunk boundaries.
      for ( my $i = $start_point; $i < $end_point; $i += $chunk_size ) {
         push @chunks, [ $range_func->($main_dbh, $i, $chunk_size, $end_point) ];
      }

      if ( $start_point < $end_point ) {
         # A final chunk that matches the end of the range, and anything outside it
         # on the upper end, which should not happen on the master but may on a
         # slave that has extra rows.
         push @chunks, [ $chunks[-1]->[1], undef ];

         # Ditto for rows below the lower boundary, but this one should not match
         # anything at all on the master, unlike the one above.
         push @chunks, [ undef, $chunks[0]->[0] ];
      }
      else {
         # There are no chunks; just do the whole table in one chunk.
         push @chunks, '';
      }

   }
   else {
      push @chunks, '';
   }

   # If the chunk column is nullable, we need to do NULL separately.
   if ( $info->{chunk_null} ) {
      push @chunks, undef;
   }

   $info->{chunks}    = \@chunks;
   $info->{chunk_tot} = scalar(@chunks);
}

sub add_progress_comment {
   my ( $query, $info ) = @_;
   $query =~ s/progress_comment/$info->{database}.$info->{table}:$info->{chunk_cur}\/$info->{chunk_tot}/;
   return $query;
}

sub do_tbl_replicate {
   my ( $host, $info, $hdr, $explain ) = @_;
   my $dbh = $main_dbh;
   my $cnt = 'NULL';
   my $crc = 'NULL';
   my $beg = time();
   my $sql = add_progress_comment($info->{query}, $info);

   my $chunk = $info->{chunks}->[$info->{chunk_cur} - 1];
   my $where = make_where_clause($main_dbh, $info, $chunk);

   # Figure out arguments to the query
   my @arg = ($info->{chunk_cur}, $where);

   if ( $opts{explain} ) {
      if ( $info->{chunk_cur} == 1 ) {
         printf($explain, @{$info}{qw(database table)}, $sql);
      }
      printf($explain, @{$info}{qw(database table)}, $where);
      return;
   }

   $dbh->do("USE `$info->{database}`");
   $dbh->do('SET @crc := NULL, @cnt := 0 /*!50108 , @@binlog_format := "STATEMENT"*/');
   $dbh->do("$sql AND $where", {}, @arg);

   $fetch_sth->execute(@{$info}{qw(database table chunk_cur)});
   ( $crc, $cnt ) = $fetch_sth->fetchrow_array();
   $update_sth->execute($crc, $cnt, @{$info}{qw(database table chunk_cur)});
   my $end = time();
   $crc  ||= 'NULL';
   printf($hdr, @{$info}{qw(database table chunk_cur)}, $host->{h}, $info->{engine}, $cnt, $crc,
      $end - $beg, 'NULL', 'NULL', 'NULL') unless ($opts{q} || $opts{explain});
}

sub do_tbl {
   my ( $host, $info, $is_master, $hdr, $explain ) = @_;
   my $dbh = get_dbh($host, $info->{database});
   my $cnt = 'NULL';
   my $crc = 'NULL';
   my $sta = 'NULL';
   my $lag = 'NULL';
   my $beg = time();
   if ( !$is_master && defined $opts{w} && !$opts{explain} ) {
      $sta = $dbh->selectall_arrayref(
         add_progress_comment(
            "SELECT /*progress_comment*/ MASTER_POS_WAIT('$info->{File}', $info->{Position}, $opts{w})",
            $info
         ))->[0]->[0];
      $sta = 'NULL' unless defined $sta;
   }
   if ( !$is_master && $opts{l} && !$opts{explain} ) {
      my $res = $dbh->selectrow_hashref("SHOW SLAVE STATUS");
      $lag = $res && defined $res->{Seconds_Behind_Master}
           ? $res->{Seconds_Behind_Master}
           : 'NULL';
   }
   my $mid = time();
   if ( $strat eq 'CHECKSUM' ) {
      if ( $opts{c} ) {
         $crc = do_checksum($dbh, $info, $explain);
      }
      if ( $opts{r} ) {
         $cnt = do_count($dbh, $info, $explain);
      }
   }
   elsif ( $opts{c} ) {
      ( $cnt, $crc ) = do_var_crc($dbh, $info, $explain);
      $crc ||= 'NULL';
   }
   else {
      $cnt = do_count($dbh, $info, $explain);
   }
   my $end = time();
   $dbh->disconnect();
   printf($hdr, @{$info}{qw(database table chunk_cur)}, $host->{h}, $info->{engine}, $cnt, $crc,
      $end - $mid, $mid - $beg, $sta, $lag) unless ($opts{q} || $opts{explain});
}

# Generates a checksum query for a given table.
sub checksum_query {
   my ( $dbh, $db, $tbl ) = @_;
   my $ddl = ($dbh->selectrow_array("SHOW CREATE TABLE `$db`.`$tbl`"))[1];
   return {engine => 'VIEW'} if $ddl =~ m/^CREATE ALGORITHM/;

   my ( $engine ) = $ddl =~ m/^\) (?:ENGINE|TYPE)=(\S+)/m;

   # Make a list of all columns.
   my @defs = $ddl =~ m/^(\s+`.*)$/gm;
   my %def_for;
   my @tocrc;
   my %type_for;
   my $chunk_exact = 0;

   # Normalize query results: make sure floats stringify the same way, and
   # disable different formatting for TIMESTAMP columns.  Grab column types too.
   foreach my $def ( @defs ) {
      my ($name, $type) = $def =~ m/(`[^`]+`)\s([a-z]+)/;
      $type_for{$name} = $type;
      $def_for{$name}  = $def;
      die "Can't parse column definition $def" unless $name;
      push @tocrc,
           $type =~ m/float|double/ && $opts{'float-precision'} ? "ROUND($name, $opts{'float-precision'})"
         : $type eq 'timestamp'                                 ? "$name + 0"
         :                                                        $name;
   }

   # See if there's an index that will support chunking.  If chunksize-exact
   # is specified, it must be single column unique or primary.
   my $chunk_col = $opts{chunkcol} || '';
   if ( $opts{C} && !$chunk_col ) {

      my @candidates;

      if ($opts{'chunksize-exact'}) {
         # Extract only single-column unique indexes.
         @candidates  = $ddl =~ m/(?:PRIMARY|UNIQUE) KEY .*?\((`[^`]+`)\)/g;
         $chunk_exact = scalar @candidates;
      }

      # Even if chunksize-exact is specified, fall back to non-exact.  Better
      # to go a little over spec than do the whole table in one chunk!
      if ( !@candidates ) {
         # We chunk only on the first column of multi-column indexes, so this
         # regex extracts the first column of every index.  The optional
         # PRIMARY or UNIQUE simply eliminates RTREE and HASH indexes, which
         # are not usable for range queries.
         @candidates = $ddl =~ m/^ *(?:PRIMARY|UNIQUE)? *KEY .*?\((`[^`]+`)/mg;
      }

      # Eliminate types for which I can't choose endpoints.
      @candidates = grep {
         exists($int_types{$type_for{$_}})
         || exists($real_types{$type_for{$_}})
      } @candidates;

      if ( @candidates ) {
         $chunk_col = $candidates[0];
      }
   }

   # To detect when columns are NULL, make a bitmap of nullable columns.
   my @nulls = map { $_ =~ m/(`[^`]+`)/g } grep { $_ !~ m/NOT NULL/ } @defs;
   if ( @nulls ) {
      push @tocrc, "CONCAT(" . join(', ', map { "ISNULL($_)" } @nulls) . ")";
   }

   # Is the chunk column nullable?  What is the type of the chunk column?
   my ($chunk_null, $null_query, $chunk_type);
   if ( $chunk_col ) {
      $chunk_null = grep { $_ eq $chunk_col } @nulls;
      $chunk_type = $type_for{$chunk_col};
   }

   # Design the column checksum expression.
   my $chks = @tocrc > 1
            ? "$func(CONCAT_WS('$opts{s}', " . join(',', @tocrc) . '))'
            : "$func($tocrc[0])";

   # Make the query.
   my $query = '';
   if ( $strat eq 'BIT_XOR' ) {

      # This checksum algorithm concatenates the columns in each row and
      # checksums them, then slices this checksum up into 16-character chunks.
      # It then converts them BIGINTs with the CONV() function, and then
      # groupwise XORs them to produce an order-independent checksum of the
      # slice over all the rows.  It then converts these back to base 16 and
      # puts them back together.  The effect is the same as XORing a very wide
      # (32 characters = 128 bits for MD5, and SHA1 is even larger) unsigned
      # integer over all the rows.

      my $slices = make_slices($chks);

      if ( $opts{R} ) {
         $query = "REPLACE /*progress_comment*/ INTO $opts{R}"
            . "(db, tbl, chunk, boundaries, this_cnt, this_crc) "
            . "SELECT '$db', '$tbl', ?, ?, COUNT(*), CONCAT($slices) AS crc "
            . "FROM `$db`.`$tbl`$opts{W}";
      }
      else {
         $query = "SELECT /*progress_comment*/ COUNT(*) as cnt, CONCAT($slices) AS crc FROM `$db`.`$tbl`$opts{W}";
      }

   }
   else { # Use an accumulator variable.
      # Find whether there's a PK (for order-by).  Since the accumulator
      # variable re-checksums every row combined with the previous row's
      # checksum, row order matters.
      my $index = $opts{i}                           ? " USE INDEX(`$opts{i}`)"
                : $ddl =~ m/PRIMARY KEY\s*\((.*?)\)/ ? ' USE INDEX(PRIMARY)'
                : '';

      # Generate the query.  This query relies on @crc being NULL, and @cnt
      # being 0 when it begins.  It checksums each row, appends it to the
      # running checksum, and checksums the two together.  In this way it acts
      # as an accumulator for all the rows.  It then prepends a steadily
      # increasing number to the left, left-padded with zeroes, so each
      # checksum taken is stringwise greater than the last.  In this way the
      # MAX() function can be used to return the last checksum calculated.
      # @cnt is not used for a row count, it is only used to make MAX() work
      # correctly.
      if ( $opts{R} ) {
         $query = "REPLACE /*progress_comment*/ INTO $opts{R}"
            . "(db, tbl, chunk, boundaries, this_cnt, this_crc) "
            . "SELECT '$db', '$tbl', ?, ?, COUNT(*) AS cnt, RIGHT(MAX("
            . "\@crc := CONCAT(LPAD(\@cnt := \@cnt + 1, 16, '0'), $func(CONCAT_WS('$opts{s}', \@crc, $chks)))"
            . "), $crc_wid) AS crc FROM `$db`.`$tbl`$index$opts{W}";
      }
      else {
         $query = "SELECT /*progress_comment*/ COUNT(*) AS cnt, RIGHT(MAX("
            . "\@crc := CONCAT(LPAD(\@cnt := \@cnt + 1, 16, '0'), $func(CONCAT_WS('$opts{s}', \@crc, $chks)))"
            . "), $crc_wid) AS crc FROM `$db`.`$tbl`$index$opts{W}";
      }
   }

   return {
       database    => $db,
       table       => $tbl,
       engine      => $engine,
       query       => $query,
       chunk_col   => $chunk_col,
       chunk_null  => $chunk_null,
       chunk_type  => $chunk_type,
       chunk_exact => $chunk_exact,
   };
}

# The WHERE clauses are tricky.  There are several cases:
# Normal:      WHERE foo >= ? AND foo < ?
# NULL slice:  WHERE foo IS NULL
# Edge cases:  WHERE foo < ?
#              WHERE foo >= ?
sub make_where_clause {
   my ( $dbh, $info, $chunk ) = @_;
   my $is_first = $info->{chunk_cur} == 1;
   my $col      = $info->{chunk_col};
   my $sql;

   return '1=1' if !$info->{chunk_col};

   if ( !defined $chunk ) {
      $sql = "$col IS NULL";
   }
   elsif ( !ref $chunk ) {
      $sql = "$col IS NOT NULL";
   }
   else {
      my ( $beg, $end ) = @$chunk;

      # The normal case is a chunk in the middle of the range somewhere.
      if ( defined $beg && defined $end ) {
         $sql = "$col >= " . $dbh->quote($beg) . " AND $col < " . $dbh->quote($end);
      }
      elsif ( defined $beg ) {
         $sql = "$col >= " . $dbh->quote($beg);
      }
      elsif ( defined $end ) {
         $sql = "$col < " . $dbh->quote($end);
      }
      else {
         die "This code should never run";
      }
   }

   return $sql;
}

sub make_slices {
   my ( $chks ) = @_;

   # Split the CRC result up into slices and glue them together.
   my @slices;
   for ( my $start = 1; $start < $crc_wid; $start += 16 ) {
      my $len = min(16, $crc_wid - $start + 1);
      push @slices,
         "LPAD(CONV(BIT_XOR("
         . "CAST(CONV(SUBSTRING(\@crc, $start, $len), 16, 10) AS UNSIGNED))"
         . ", 10, 16), $len, '0')";
   }

   if ( $crc_slice < @slices && $opts{o} ) {
      $slices[$crc_slice] =~ s/\@crc/\@crc := $chks/;
   }
   else {
      map { s/\@crc/$chks/ } @slices;
   }

   return join(', ', @slices);
}

exit $exit_status;

# ############################################################################
# Subroutines
# ############################################################################

sub parse_dsn {
   my ($dsn, $prev) = @_;
   return unless $dsn;
   $prev ||= {};

   my %vals;
   if ( $dsn =~ m/=/ ) {
      my %hash = map { m/^(.)=(.*)$/g } split(/,/, $dsn);
      %vals = map  { $_ => $hash{$_} } keys %conn;
   }
   else {
      $vals{h} = $dsn;
   }
   map { $vals{$_} ||= $prev->{$_} || $opts{$_} } keys %conn;
   die "Missing host (h) part in $dsn\n" unless $vals{h};
   return \%vals;
}

# Range functions.
sub range_num {
   my ( $dbh, $start, $interval, $max ) = @_;
   return ( $start, min($max, $start + $interval) );
}

sub range_time {
   my ( $dbh, $start, $interval, $max ) = @_;
   return $dbh->selectrow_array(
      "SELECT SEC_TO_TIME($start), SEC_TO_TIME(LEAST($max, $start + $interval))");
}

sub range_date {
   my ( $dbh, $start, $interval, $max ) = @_;
   return $dbh->selectrow_array(
      "SELECT FROM_DAYS($start), FROM_DAYS(LEAST($max, $start + $interval))");
}

sub range_datetime {
   my ( $dbh, $start, $interval, $max ) = @_;
   return $dbh->selectrow_array(
      "SELECT DATE_ADD('$EPOCH', INTERVAL $start SECOND),
       DATE_ADD('$EPOCH', INTERVAL LEAST($max, $start + $interval) SECOND)");
}

sub range_timestamp {
   my ( $dbh, $start, $interval, $max ) = @_;
   return $dbh->selectrow_array(
      "SELECT FROM_UNIXTIME($start), FROM_UNIXTIME(LEAST($max, $start + $interval))");
}

# Returns the number of seconds between $EPOCH and the value, according to
# the MySQL server.  (The server can do no wrong).  I believe this code is right
# after looking at the source of sql/time.cc but I am paranoid and add in an
# extra check just to make sure.  Earlier versions overflow on large interval
# values, such as on 3.23.58, '1970-01-01' - interval 58000000000 second is
# 2037-06-25 11:29:04.  I know of no workaround.
sub timestampdiff {
   my ( $dbh, $time ) = @_;
   my ( $diff ) = $dbh->selectrow_array(
      "SELECT (TO_DAYS('$time') * 86400 + TIME_TO_SEC('$time')) "
      . "- TO_DAYS('$EPOCH 00:00:00') * 86400");
   my ( $check ) = $dbh->selectrow_array(
      "SELECT DATE_ADD('$EPOCH', INTERVAL $diff SECOND)");
   die <<"   EOF"
   Incorrect datetime math: given $time, calculated $diff but checked to $check.
   This is probably because you are using a version of MySQL that overflows on
   large interval values.  If not, please report this as a bug.
   EOF
      unless $check eq $time;
   return $diff;
}

sub get_dbh {
   my ( $info, $db ) = @_;

   if ( $opts{askpass} && !defined $info->{p} ) {
      print "Enter password for $info->{h}: ";
      ReadMode('noecho');
      chomp($info->{p} = <STDIN>);
      ReadMode('normal');
      print "\n";
   }

   $db ||= $info->{D} || '';
   my $dsn = "DBI:mysql:$db;host=$info->{h};"
      . join(';', map  { "$conn{$_}=$info->{$_}" } grep { defined $info->{$_} } qw(F h P S))
      . ';mysql_read_default_group=mysql';
   return DBI->connect($dsn, @{$info}{qw(u p)}, $db_options);
}

sub do_var_crc {
   my ( $dbh, $info, $explain ) = @_;

   my $sql   = add_progress_comment($info->{query}, $info);
   my $chunk = $info->{chunks}->[$info->{chunk_cur} - 1];
   my $where = make_where_clause($main_dbh, $info, $chunk);

   if ( $opts{explain} ) {
      if ( $info->{chunk_cur} == 1 ) {
         printf($explain, @{$info}{qw(database table)}, $sql);
      }
      printf($explain, @{$info}{qw(database table)}, $where);
      return;
   }

   $dbh->do('set @crc := NULL, @cnt := 0');
   my $res = $dbh->selectall_arrayref("$sql AND $where", { Slice => {} })->[0];
   return ($res->{cnt}, $res->{crc});
}

sub do_checksum {
   my ( $dbh, $info, $explain ) = @_;
   my $sql = "CHECKSUM TABLE `$info->{database}`.`$info->{table}`";
   if ( $opts{explain} ) {
      printf($explain, @{$info}{qw(database table)}, $sql);
   }
   else {
      my $res = $dbh->selectrow_hashref($sql);
      return $res->{Checksum} if $res;
   }
}

sub do_count {
   my ( $dbh, $info, $explain ) = @_;
   my $sql = "SELECT COUNT(*) FROM `$info->{database}`.`$info->{table}`$opts{W}";
   if ( $opts{explain} ) {
      printf($explain, @{$info}{qw(database table)}, $sql);
   }
   else {
      return $dbh->selectall_arrayref($sql)->[0]->[0];
   }
}

sub unique {
   my %seen;
   grep { !$seen{$_}++ } @_;
}

# Compares versions like 5.0.27 and 4.1.15-standard-log
sub version_ge {
   my ( $dbh, $target ) = @_;
   my $version = sprintf('%03d%03d%03d', $dbh->{mysql_serverinfo} =~ m/(\d+)/g);
   return $version ge sprintf('%03d%03d%03d', $target =~ m/(\d+)/g);
}

# Tries to extract the MySQL error message and print it
sub print_err {
   my ( $msg, $database, $table ) = @_;
   return if !defined $msg
      # Honor --quiet in the (common?) event of dropped tables or deadlocks
      or ($opts{q} && $EVAL_ERROR =~ m/: Table .*? doesn't exist|Deadlock found/);
   $msg =~ s/^.*?failed: (.*?) at \S+ line (\d+).*$/$1 at line $2/s;
   $msg =~ s/\s+/ /g;
   if ( $database && $table ) {
      $msg .= " while doing $database.$table";
   }
   print STDERR $msg, "\n";
}

# ############################################################################
# Documentation
# ############################################################################
=pod

=head1 NAME

mysql-table-checksum - Perform an online replication consistency check, or
efficiently checksum MySQL tables on one or many servers.

=head1 SYNOPSIS

   mysql-table-checksum --replicate=mydb.checksum master-host
   ... time passses, replication catches up ...
   mysql-table-checksum --replicate=mydb.checksum --replcheck master-host

Or,

   mysql-table-checksum h=host1,u=user,p=password h=host2 ...

Or,

   mysql-table-checksum host1 host2 ... hostN | mysql-checksum-filter

See L<"SPECIFYING HOSTS"> for more on the syntax of the host arguments.

=head1 OVERVIEW

MySQL Table Checksum generates table checksums for MySQL tables, typically
useful for verifying your slaves are in sync with the master.  The checksums
are generated by a query on the server, and there is virtually no network
traffic as a result.

Checksums typically take about twice as long as COUNT(*) on very large InnoDB
tables in my tests.  For smaller tables, COUNT(*) is a good bit faster than
the checksums.  See L<"--algorithm"> for more details on performance.

If you specify more than one server, MySQL Table Checksum assumes the first
server is the master and others are slaves.  Checksums are parallelized for
speed, forking off a child process for each table.  Duplicate server names are
ignored, but if you want to checksum a server against itself you can use two
different forms of the hostname (for example, "localhost 127.0.0.1", or
"h=localhost,p=3306 h=localhost,p=3307")

MySQL Table Checksum only examines table structure on the first host specified,
so if anything differs on the others, it won't notice.  It ignores views.

The checksums are tested on MySQL version 3.23.58 through 6.0-alpha.

=head1 SPECIFYING HOSTS

MySQL Table Checksum connects to a theoretically unlimited number of MySQL
servers.  You specify a list of one or more host definitions on the command
line, such as "host1 host2".  Each host definition can be just a hostname, or it
can be a complex string that specifies connection options as well.  You can
specify connection options two ways:

=over

=item *

Format a host definition in a key=value,key=value form.  If an argument on the
command line contains the letter '=', MySQL Table Checksum will parse it into
its component parts.  The parts are as follows:

   KEY MEANING
   === =======
   h   Connect to host
   P   Port number to use for connection
   S   Socket file to use for connection
   u   User for login if not current user
   p   Password to use when connecting
   F   Only read default options from the given file

With this in mind, consider that specifying a list of simple host definitions
"host1 host2" is equivalent to the more complicated "h=host1 h=host2" format.

=item *

With the command-line options such as L<"--user"> and L<"--password">.  These
options, if given, apply globally to all host definitions.

=back

In addition to specifying connection options this way, MySQL Table Checksum
allows shortcuts.  Any options specified for the first host definition on the
command line fill in missing values in subsequent ones.  Any options that are
still missing after this are filled in from the command-line options if
possible.

In other words, the places you specify connection options have precedence:
highest precedence is the option specified directly in the host definition, next
is the option specified in the first host definition, and lowest is the
command-line option.

You can freely mix simple and complex host definitions and/or command-line
arguments.  For example, if all your servers except one of your slaves uses a
non-standard port number:

   mysql-table-checksum --port 4500 master h=slave1,p=3306 slave2 slave3

If you are confused about how MySQL Table Checksum will connect to your servers,
give the L<"--explainhosts"> option and it will tell you.

=head1 OPTIONS

Many options are enabled by default and can be disabled by prefixing with --no.

=over

=item --algorithm

Specifies which checksum algorithm to use.  Valid arguments are CHECKSUM,
BIT_XOR and ACCUM.  The latter two do cryptographic hash checksums.

CHECKSUM is built into MySQL, but has some disadvantages.  BIT_XOR and ACCUM are
implemented by the SQL queries mysql-table-checksum generates.  They use a
cryptographic hash of all columns concatenated together with a separator,
followed by a bitmap of each nullable column that is NULL (necessary because
CONCAT_WS() skips NULL columns).

CHECKSUM is the default.  This method uses MySQL's built-in CHECKSUM TABLE
command.  It cannot be used before MySQL 4.1.1, and various options disable it
as well.  It does not simultaneously count rows; that requires an extra COUNT(*)
query.  This is a good option when you are using MyISAM tables with live
checksums enabled; in this case both the COUNT(*) and CHECKSUM queries will run
very quickly.  CHECKSUM TABLE is a little faster than the other two
algorithms, but not very much.

The BIT_XOR algorithm is available for MySQL 4.1.1 and newer.  It uses
BIT_XOR(), which is order-independent, to reduce all the rows to a single
checksum.  It runs within an order of magnitude of COUNT(*) on InnoDB tables;
on large tables it's typically about half as fast as COUNT(*).

ACCUM uses a user variable as an accumulator.  It reduces each row to a single
checksum, which is concatenated with the accumulator and re-checksummed.  This
technique is order-dependent.  If the table has a primary key, it will be used
to order the results for consistency; otherwise it's up to chance.  It tends to
use a little less CPU and run a little faster than the BIT_XOR algorithm.

The ACCUM algorithm has two possible advantages over BIT_XOR: speed (there may
be fewer cryptographic hash operations and it may read less data) and possibly
fewer collisions.  The pathological worst case is where identical rows will
cancel each other out in the BIT_XOR.  In this case you will not be able to
distinguish a table full of one value from a table full of another value.  The
ACCUM algorithm will distinguish them.

However, the ACCUM algorithm is order-dependent, so if you have two tables
with identical data but the rows are out of order, you'll get different
checksums with ACCUM.

Choose your (mild) poison.  Each algorithm is very good in reality.  If a given
algorithm won't work for some reason, mysql-table-checksum falls back to
another.  The least common denominator is ACCUM, which works on MySQL 3.23.2 and
newer.

One reason to specify a cryptographic hash algorithm instead of CHECKSUM is to
checksum tables that have the same data but different row formats (possibly
because of different storage engines), and thus will return different values for
CHECKSUM TABLE.

=item --askpass

Prompt for a password for each host for which no password is given.

=item --chunkcol

Specifies a column for chunking (see L<"--chunksize">).  You should not need to do
this normally, because mysql-table-checksum can find a suitable column if one
exists.  Be careful of using character columns, because mysql-table-checksum's
chunking algorithm works only on numbers.  For instance, if you have a character
column containing values 1 through 90 and you specify a chunksize of 50,
mysql-table-checksum will checksum BETWEEN 1 AND 50, then BETWEEN 51 and 100.
The second query will not match any rows because 51 is stringwise greater than
100.

=item --chunksize

If you specify a chunk size, mysql-table-checksum will look for an index whose
first colum is a numeric or temporal data type.  It will estimate the number of
rows to be checksummed and split them into ranges of approximately L<"--chunksize">
rows, based on the table's index statistics.  It will checksum each range
separately with parameters in the checksum query's WHERE clause.  The WHERE
comparisons will refer to the first column of the chosen index.

If mysql-table-checksum cannot find a suitable index, it will do the entire
table in one chunk as though you had not specified L<"--chunksize"> at all.  Each
table is handled individually, so some tables may be chunked and others not.

The chunks will be approximately sized, and depending on the distribution of
values in the indexed column, some chunks may be larger than the value you
specify.  If it is important for you to avoid this, you may be able to use
L<"--chunksize-exact">.  Otherwise, perhaps you should just specify a smaller size,
such as half the size you really want.

You can override mysql-table-checksum's choice of column with L<"--chunkcol">.

=item --chunksize-exact

If this option is given, mysql-table-checksum will checksum the table in chunks
no larger than L<"--chunksize">.  This requires a single-column integral or date
index.  The index must be a UNIQUE or PRIMARY KEY.

If the table doesn't have a suitable index, mysql-table-checksum will try to
chunk approximately instead of just doing the whole table in one chunk.

=item --count

Count the rows as well as taking their checksum. This is disabled by default to
avoid an extra COUNT(*) query when L<"--algorithm"> is CHECKSUM.  For other
algorithms, you get a count for free.  If you have only MyISAM tables and live
checksums are enabled, both CHECKSUM and COUNT will be very fast, but otherwise
you may want to use one of the other algorithms.

=item --crc

Take the checksum of the rows as well as their count.  This is enabled by
default.  If you disable it, you'll just get COUNT(*) queries.

=item --databases

Only checksum this comma-separated list of databases.

=item --defaults-file

If you specify this option, only this file is read for MySQL default options;
otherwise all the default files will be read.

=item --emptyrepltbl

Issues a DELETE against the table given by L<"--replicate"> before beginning work.
Ignored if L<"--replicate"> is not specified.  This can be useful to remove entries
related to tables that no longer exist, or just to clean out the results of a
previous run.

If you specify L<"--databases"> or L<"--tables">, MySLQ Table Checksum will
construct a WHERE clause for the DELETE statement, so only matching rows will
be deleted.

=item --engine

Only checksum tables whose storage engine is in this comma-separated list.
You can use this to restrict the checksum to InnoDB, for example.

=item --explain

Print checksum queries and WHERE clauses (if chunking is enabled) for each
table, but do not execute the queries.

=item --explainhosts

Print out a list of hosts to which MySQL Table Checksum will connect, with all
the various connection options, and exit.  See L<"SPECIFYING HOSTS">.

=item --float-precision

If you specify this option, FLOAT and DOUBLE columns will be rounded to the
specified number of digits after the decimal point for the checksum.  This can
avoid checksum mismatches due to different floating-point representations of
the same values on different MySQL versions and hardware.

=item --function

You can use this option to choose the cryptographic hash function used for
L<"--algorithm">=ACCUM or L<"--algorithm">=BIT_XOR.  The default is to use SHA1, but MD5
is also a good choice.  Whatever function you specify is run in SQL, not in
Perl, so it must be available to MySQL.

=item --help

Displays a help message.

=item --ignoredb

Use this option to skip a comma-separated list of databases.

=item --ignoretbl

Use this option to skip a comma-separated list of tables.

=item --index

If you specify L<"--algorithm">=ACCUM and the table has no PRIMARY KEY, row ordering
will be non-deterministic, and you may get unpredictable results.  If there is
another index that will give predictable results, this option can be used to
specify it.

=item --lock

This option can help you to get a consistent read on a master and many slaves.
If you specify this option, mysql-table-checksum will lock the table on the
first server on the command line, which it assumes to be the master.  It will
keep this lock until the checksums complete on the other servers.

This option isn't very useful by itself, so you probably want to use L<"--wait">
instead.

=item --optxor

This option, which is enabled by default, specifies to use user variables to
reduce the number of times each row must be passed through the cryptographic
hash function when you are using the BIT_XOR algorithm.

With the optimization, the queries look like this in pseudo-code:

  SELECT CONCAT(
     BIT_XOR(SLICE_OF(@user_variable)),
     BIT_XOR(SLICE_OF(@user_variable)),
     ...
     BIT_XOR(SLICE_OF(@user_variable := HASH(col1, col2... colN))));

The exact positioning of user variables and calls to the hash function is
determined dynamically, and will vary between MySQL versions.  Without the
optimization, it looks like this:

  SELECT CONCAT(
     BIT_XOR(SLICE_OF(MD5(col1, col2... colN))),
     BIT_XOR(SLICE_OF(MD5(col1, col2... colN))),
     ...
     BIT_XOR(SLICE_OF(MD5(col1, col2... colN))));

The difference is the number of times all the columns must be mashed together
and fed through the hash function.  If you are checksumming really large
columns, such as BLOB or TEXT columns, this might make a big difference.

=item --password

The password to use when connecting.

=item --port

The port number to use for the connection.

=item --recursecheck

Recursively runs L<"--replcheck"> to check the entire replication subtree
rooted at the given master.

=item --replcheck

Connects to the master and runs SHOW SLAVE HOSTS, then connects to each slave
of the master, runs the query shown in L<"CONSISTENT CHECKSUMS">, and prints
results.  Exits after printing.  This is just a convenient way of running the
query so you don't have to do it manually.

The output is one informational line per slave host, followed by the results
of the query, if any.  If L<"--quiet"> is specified, there is no output.

Requires L<"--replicate"> to be specified so it knows which table to query.
Connection information for each slave is derived from the same
default-and-override method described in L<"SPECIFYING HOSTS">.  The host and
port from SHOW SLAVE HOSTS are combined into "h=host,P=port" and used as the
argument.

This requires the @@SERVER_ID system variable, so it only works on MySQL
3.23.26 or newer.

If any slave has chunks that differ from the master, MySQL Table Checksum's
exit status is 1; otherwise it is 0.

=item --replicate

This option enables a completely different checksum strategy for a consistent,
lock-free checksum across a master and its slaves.  This only works with
statement-based replication (mysql-table-checksum will switch the binlog format
to STATEMENT for the duration of the session if your server uses row-based
replication).  Instead of running the checksum queries on each server, you only
run it on the master.  You specify a table to insert the results into.  The
query will insert directly into the table, so it will be replicated through the
binlog to the slaves.

The argument to the option is the table in which the checksums should be stored.
The table must have at least these columns: db, tbl, chunk, boundaries,
this_crc, master_crc, this_cnt, master_cnt.  Here is a suggested table
structure:

  CREATE TABLE checksum (
     db         char(64)     NOT NULL,
     tbl        char(64)     NOT NULL,
     chunk      int          NOT NULL,
     boundaries char(64)     NOT NULL,
     this_crc   char(40)     NOT NULL,
     this_cnt   int          NOT NULL,
     master_crc char(40)         NULL,
     master_cnt int              NULL,
     ts         timestamp    NOT NULL,
     PRIMARY KEY (db, tbl, chunk)
  );

Be sure to choose an appropriate storage engine for the checksum table.  If you
are checksumming InnoDB tables, for instance, a deadlock will break replication
if the checksum table is non-transactional, because the transaction will still
be written to the binlog.  It will then replay without a deadlock on the
slave and break replication with "different error on master and slave."  This
is not a problem with MySQL Table Checksum, it's a problem with MySQL
replication, and you can read more about it in the MySQL manual.

When the queries are finished replicating, you can run a simple query on each
slave to see which tables have differences from the master.  See L<"CONSISTENT
CHECKSUMS"> for details.  If you find tables that have differences, you can use
the chunk boundaries in a WHERE clause to MySQL Table Sync to help repair
them more efficiently.  See L<mysql-table-sync> for details.

This option eliminates the need to do complicated locking and unlocking, waiting
for master binlog positions, and so on.  It disables L<"--lock">, L<"--wait">,
and L<"--slavelag">.

The checksum queries actually do a REPLACE into this table, so existing rows
need not be removed before running.  However, you may wish to do this anyway
to remove rows related to tables that don't exist anymore.  The L<"--emptyrepltbl">
option does this for you.

Since mysql-table-checksum uses USE to select the table's database as its
default database before executing the checksum query, the checksum queries
should replicate to slaves even if --binlog-do-db settings on the master filter
out the checksum table's database.  For more information on how --binlog-do-db
works, see L<http://dev.mysql.com/doc/en/binary-log.html>.

If the slaves have any --replicate-do-X or replicate-ignore-X options, you
should be careful not to checksum any databases or tables that exist on the
master and not the slaves.  Changes to such tables may not normally be executed
on the slaves because of the --replicate-X options, but the checksum queries
change the checksum table, not the tables they checksum.  Therefore these
queries will be executed on the slave, and if the table or database does not
exist, they will cause replication to fail.  For more information on replication
rules, see L<http://dev.mysql.com/doc/en/replication-rules.html>.

=item --separator

This option controls the separator character used for CONCAT_WS() when taking
row checksums with user-variables.

=item --slavelag

If this option is enabled, the output will show how many seconds behind the
master each slave is.  This can be useful when you want a fast, parallel,
non-blocking checksum, and you know your slaves might lag the master.  You can
inspect the results and make an educated guess whether any discrepancies on the
slave are due to slave lag instead of corrupt data.

=item --sleep

If this option is specified, mysql-table-checksum will sleep the specified
number of seconds between checksums.  That is, it will sleep between every
table, and if you specify L<"--chunksize">, it will also sleep between chunks.

=item --sleep-coef

If this option is specified, mysql-table-checksum will sleep the amount of
time elapsed during the previous checksum, multiplied by the specified
coefficient.  This option is ignored if L<"--sleep"> is specified.

=item --socket

The socket file to use for the connection.

=item --tab

Instead of column-aligned output, print tab-separated output.

=item --tables

Restrict checksums to this comma-separated list of tables.

=item --user

MySQL user account to use for the connection.

=item --verify

This option is enabled by default.  It runs a trivial checksum on all servers to
ensure they have compatible CONCAT_WS() and cryptographic hash functions.

Versions of MySQL before 4.0.14 will skip empty strings and NULLs in
CONCAT_WS, and others will only skip NULLs.  The two kinds of behavior will
produce different results if you have any columns containing the empty string
in your table.  If you know you don't (for instance, all columns are
integers), you can safely disable this check and you will get a reliable
checksum even on servers with different behavior.

This option also checks all servers to be sure the L<"--optxor"> optimization will
work correctly.  If not, it simply disables the optimization, rather than
stopping with an error.

=item --version

Output version information and exit.

=item --wait

This option helps you get a consistent checksum across a master server and its
slaves.  It combines locking and waiting to accomplish this.  First it locks the
table on the master (the first server on the command line).  Then it finds the
master's binlog position and checksums.

The argument to the option is the number of seconds to wait for the slaves to
catch up to the master.  It is actually the argument to MASTER_POS_WAIT().  If
the slaves don't catch up to the master within this time, they will unblock
and go ahead with the checksum.  You can tell whether this happened by
examining the STAT column in the output.

=item --where

You can use this option to limit the checksum to only part of the table.  This
is particularly useful if you have append-only tables and don't want to
constantly re-check all rows; you could run a daily job to just check
yesterday's rows, for instance.

This option is much like the -w option to mysqldump.  Do not specify the WHERE
keyword.  You may need to quote the value.  Here is an example:

  mysql-table-checksum --where "foo=bar"

=back

=head1 CONSISTENT CHECKSUMS

If you are using this tool to verify your slaves still have the same data as the
master, which is why I wrote it, you should read this section.

The best way to do this with replication is to use the L<"--replicate"> option.  When
the queries are finished running on the master and its slaves, you can go to the
slaves and issue SQL queries to see if any tables are different from the master.
Try the following:

  SELECT db, tbl, chunk, this_cnt-master_cnt AS cnt_diff,
     this_crc <> master_crc OR ISNULL(master_crc) <> ISNULL(this_crc) AS crc_diff
  FROM checksum
  WHERE master_cnt <> this_cnt OR master_crc <> this_crc
     OR ISNULL(master_crc) <> ISNULL(this_crc);

The L<"--replcheck"> option can do this query for you.  If you can't use this
method, try the following:

=over

=item *

If your servers are not being written to, you can just run the tool with no
further ado:

  mysql-table-checksum server1 server2 ... serverN

=item *

If the servers are being written to, you need some way to make sure they are
consistent at the moment you run the checksums.  For situations other than
master-slave replication, you will have to figure this out yourself.  You may
be able to use the L<"--where"> option with a date or time column to only checksum
data that's not recent.

=item *

If you are checksumming a master and slaves, you can do a fast parallel
checksum and assume the slaves are caught up to the master.  In practice, this
tends to work well except for tables which are constantly updated.  You can
use the L<"--slavelag"> option to see how far behind each slave was when it
checksummed a given table.  This can help you decide whether to investigate
further.

=item *

The next most disruptive technique is to lock the table on the master, then take
checksums.  This should prevent changes from propagating to the slaves.  You can
just lock on the master (with L<"--lock">), or you can both lock on the master and wait
on the slaves till they reach that point in the master's binlog (L<"--wait">).  Which is
better depends on your workload; only you know that.

=item *

If you decide to make the checksums on the slaves wait until they're guaranteed
to be caught up to the master, the algorithm looks like this:

 For each table,
   Master: lock table
   Master: get pos
   In parallel,
     Master: checksum
     Slave(s): wait for pos, then checksum
   End
   Master: unlock table
 End

=back

What I typically do when I'm not using the L<"--replicate"> option is simply run the
tool on all servers with no further options.  This runs fast, parallel,
non-blocking checksums simultaneously.  If there are tables that look different,
I re-run with L<"--wait">=600 on the tables in question.  This makes the tool lock on
the master as explained above.

=head1 OUTPUT

Output is to STDOUT, one line per server and table, with header lines for each
database.  I tried to make the output easy to process with awk.  For this reason
columns are always present.  If there's no value, mysql-table-checksum prints 'NULL'.

The default is column-aligned output for human readability, but you can change
it to tab-separated if you want.  Use the L<"--tab"> option for this.

Output is unsorted, though all lines for one table should be output together.
For speed, all checksums are done in parallel (as much as possible) and may
complete out of the order in which they were started.  You might want to run
them through another script or command-line utility to make sure they are in the
order you want.  If you pipe the output through mysql-checksum-filter, you can
sort the output and/or avoid seeing output about tables that have no differences.

The columns in the output are as follows.  The database, table, and chunk come
first so you can sort by them easily (they are the "primary key").

Output from L<"--replcheck"> is different.

=over

=item DATABASE

The database the table is in.

=item TABLE

The table name.

=item CHUNK

The chunk (see L<"--chunksize">).  Zero if you are not doing chunked checksums.

=item HOST

The server's hostname.

=item ENGINE

The table's storage engine.

=item COUNT

The table's row count, unless you specified to skip it.

=item CHECKSUM

The table's checksum, unless you specifed to skip it or the table has no rows.
some types of checksums will be 0 if there are no rows; others will print NULL.

=item TIME

The time the actual checksum and/or counting took.

=item WAIT

How long the checksum blocked before beginning.

=item STAT

The return value of MASTER_POS_WAIT().

=item LAG

How far the slave lags the master, as reported by SHOW SLAVE STATUS.

=back

=head1 EXIT STATUS

A successful exit status is 0.  If there is an error checksumming any table,
the exit status is 1.

When running L<"--replcheck">, if any slave has chunks that differ from the
master, the exit status is 1.

=head1 QUERIES

If you are using innotop (see L<http://innotop.sourceforge.net/>),
mytop, or another tool to watch currently running MySQL queries, you may see
the checksum queries.  They look similar to this:

  REPLACE /*test.test_tbl:'2'/'5'*/ INTO test.checksum(db, ...

Since mysql-table-checksum's queries run for a long time and tend to be
textually very long, and thus won't fit on one screen of these monitoring
tools, I've been careful to place a comment at the beginning of the query so
you can see what it is and what it's doing.  The comment contains the name of
the table that's being checksummed, the chunk it is currently checksumming,
and how many chunks will be checksummed.  In the case above, it is
checksumming chunk 2 of 5 in table test.test_tbl.

=head1 SEE ALSO

See also L<mysql-checksum-filter> and L<mysql-table-sync>.

=head1 BUGS

Please use the Sourceforge bug tracker, forums, and mailing lists to request
support or report bugs: L<http://sourceforge.net/projects/mysqltoolkit/>.

=head1 SYSTEM REQUIREMENTS

You need Perl, DBI, DBD::mysql, and some core packages that ought to be
installed in any reasonably new version of Perl.

=head1 AUTHOR

Baron "Xaprb" Schwartz.

=head1 ACKNOWLEDGEMENTS

This is an incomplete list.  My apologies for omissions or misspellings.

Claus Jeppesen,
Francois Saint-Jacques,
Giuseppe Maxia,
Heikki Tuuri,
James Briggs,
Martin Friebe,
Sergey Zhuravlev,

=head1 COPYRIGHT, LICENSE AND WARRANTY

This program is copyright (c) 2007 Baron Schwartz.
Feedback and improvements are welcome.

THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.

This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, version 2; OR the Perl Artistic License.  On UNIX and similar
systems, you can issue `man perlgpl' or `man perlartistic' to read these
licenses.

You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA  02111-1307  USA.

=head1 VERSION

This manual page documents Ver 1.1.14 Distrib 848 $Revision: 839 $.

=cut
