@recaptime-dev's working patches + fork for Phorge, a community fork of Phabricator. (Upstream dev and stable branches are at upstream/main and upstream/stable respectively.) hq.recaptime.dev/wiki/Phorge
phorge phabricator
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Dump tables one at a time, rather than all at once

Summary:
Ref T13000. This allows us to be more selective about which tables we dump data for, to reduce the size of backups and exports. The immediate goal is to make large `ngrams` tables more manageable in the cluster, but this generally makes all backups and exports faster and easier.

Here, tables are dumped one at a time. A followup change will sometimes add the `--no-data` flag, to skip dumping readthrough caches and (optionally) rebuildable indexes.

Test Plan: Compared a dump from `master` and from this branch, found them to be essentially identical. The new dump has a little more header information in each section. Verified each contains the same number of `CREATE TABLE` statements.

Reviewers: amckinley

Reviewed By: amckinley

Maniphest Tasks: T13000

Differential Revision: https://secure.phabricator.com/D18679

+74 -42
+74 -42
src/infrastructure/storage/management/workflow/PhabricatorStorageManagementDumpWorkflow.php
··· 62 62 return 1; 63 63 } 64 64 65 - $databases = $api->getDatabaseList($patches, true); 65 + $ref = $api->getRef(); 66 + $ref_key = $ref->getRefKey(); 67 + 68 + $schemata_map = id(new PhabricatorConfigSchemaQuery()) 69 + ->setAPIs(array($api)) 70 + ->setRefs(array($ref)) 71 + ->loadActualSchemata(); 72 + $schemata = $schemata_map[$ref_key]; 73 + 74 + $targets = array(); 75 + foreach ($schemata->getDatabases() as $database_name => $database) { 76 + foreach ($database->getTables() as $table_name => $table) { 77 + $targets[] = array( 78 + 'database' => $database_name, 79 + 'table' => $table_name, 80 + ); 81 + } 82 + } 66 83 67 84 list($host, $port) = $this->getBareHostAndPort($api->getHost()); 68 85 ··· 126 143 $argv[] = $port; 127 144 } 128 145 129 - $argv[] = '--databases'; 130 - foreach ($databases as $database) { 131 - $argv[] = $database; 132 - } 146 + $commands = array(); 147 + foreach ($targets as $target) { 148 + $target_argv = $argv; 133 149 150 + if ($has_password) { 151 + $commands[] = csprintf( 152 + 'mysqldump -p%P %Ls -- %R %R', 153 + $password, 154 + $target_argv, 155 + $target['database'], 156 + $target['table']); 157 + } else { 158 + $command = csprintf( 159 + 'mysqldump %Ls -- %R %R', 160 + $target_argv, 161 + $target['database'], 162 + $target['table']); 163 + } 134 164 135 - if ($has_password) { 136 - $command = csprintf('mysqldump -p%P %Ls', $password, $argv); 137 - } else { 138 - $command = csprintf('mysqldump %Ls', $argv); 165 + $commands[] = $command; 139 166 } 167 + 140 168 141 169 // Decrease the CPU priority of this process so it doesn't contend with 142 170 // other more important things. ··· 144 172 proc_nice(19); 145 173 } 146 174 147 - 148 - // If we aren't writing to a file, just passthru the command. 149 - if ($output_file === null) { 150 - return phutil_passthru('%C', $command); 151 - } 152 - 153 175 // If we are writing to a file, stream the command output to disk. This 154 176 // mode makes sure the whole command fails if there's an error (commonly, 155 177 // a full disk). See T6996 for discussion. 156 178 157 - if ($is_compress) { 179 + if ($output_file === null) { 180 + $file = null; 181 + } else if ($is_compress) { 158 182 $file = gzopen($output_file, 'wb1'); 159 183 } else { 160 184 $file = fopen($output_file, 'wb'); ··· 166 190 'Failed to open file "%s" for writing.', 167 191 $file)); 168 192 } 169 - 170 - $future = new ExecFuture('%C', $command); 171 193 172 194 try { 173 - $iterator = id(new FutureIterator(array($future))) 174 - ->setUpdateInterval(0.100); 175 - foreach ($iterator as $ready) { 176 - list($stdout, $stderr) = $future->read(); 177 - $future->discardBuffers(); 195 + foreach ($commands as $command) { 196 + $future = new ExecFuture('%C', $command); 178 197 179 - if (strlen($stderr)) { 180 - fwrite(STDERR, $stderr); 181 - } 198 + $iterator = id(new FutureIterator(array($future))) 199 + ->setUpdateInterval(0.100); 200 + foreach ($iterator as $ready) { 201 + list($stdout, $stderr) = $future->read(); 202 + $future->discardBuffers(); 182 203 183 - if (strlen($stdout)) { 184 - if ($is_compress) { 185 - $ok = gzwrite($file, $stdout); 186 - } else { 187 - $ok = fwrite($file, $stdout); 204 + if (strlen($stderr)) { 205 + fwrite(STDERR, $stderr); 188 206 } 189 207 190 - if ($ok !== strlen($stdout)) { 191 - throw new Exception( 192 - pht( 193 - 'Failed to write %d byte(s) to file "%s".', 194 - new PhutilNumber(strlen($stdout)), 195 - $output_file)); 208 + if (strlen($stdout)) { 209 + if (!$file) { 210 + $ok = fwrite(STDOUT, $stdout); 211 + } else if ($is_compress) { 212 + $ok = gzwrite($file, $stdout); 213 + } else { 214 + $ok = fwrite($file, $stdout); 215 + } 216 + 217 + if ($ok !== strlen($stdout)) { 218 + throw new Exception( 219 + pht( 220 + 'Failed to write %d byte(s) to file "%s".', 221 + new PhutilNumber(strlen($stdout)), 222 + $output_file)); 223 + } 196 224 } 197 - } 198 225 199 - if ($ready !== null) { 200 - $ready->resolvex(); 226 + if ($ready !== null) { 227 + $ready->resolvex(); 228 + } 201 229 } 202 230 } 203 231 204 - if ($is_compress) { 232 + if (!$file) { 233 + $ok = true; 234 + } else if ($is_compress) { 205 235 $ok = gzclose($file); 206 236 } else { 207 237 $ok = fclose($file); ··· 218 248 // we don't leave any confusing artifacts laying around. 219 249 220 250 try { 221 - Filesystem::remove($output_file); 251 + if ($file !== null) { 252 + Filesystem::remove($output_file); 253 + } 222 254 } catch (Exception $ex) { 223 255 // Ignore any errors we hit. 224 256 }