comparison src/jdbcpgbackup/ZipBackup.java @ 0:7ecd1a4ef557

add content
author Franklin Schmidt <fschmidt@gmail.com>
date Thu, 21 Mar 2019 19:15:52 -0600
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:7ecd1a4ef557
1 /* Copyright (c) 2012 Tomislav Gountchev <tomi@gountchev.net> */
2
3 package jdbcpgbackup;
4
5 import java.io.BufferedOutputStream;
6 import java.io.BufferedReader;
7 import java.io.File;
8 import java.io.FileOutputStream;
9 import java.io.FileReader;
10 import java.io.IOException;
11 import java.io.InputStreamReader;
12 import java.io.PrintStream;
13 import java.sql.Connection;
14 import java.sql.DriverManager;
15 import java.sql.PreparedStatement;
16 import java.sql.SQLException;
17 import java.util.ArrayList;
18 import java.util.Collection;
19 import java.util.Collections;
20 import java.util.Date;
21 import java.util.Enumeration;
22 import java.util.HashMap;
23 import java.util.HashSet;
24 import java.util.List;
25 import java.util.Map;
26 import java.util.Set;
27 import java.util.zip.ZipEntry;
28 import java.util.zip.ZipFile;
29 import java.util.zip.ZipOutputStream;
30
31 public final class ZipBackup {
32
33 private static final String zipRoot = "pg_backup/";
34 public static final int DEFAULT_BATCH_SIZE = 10000;
35
36 private final String jdbcUrl;
37 private final File file;
38
39 private DBOFactory<Schema> schemaFactory = new Schema.SchemaFactory();
40 private DBOFactory<View> viewFactory = new View.ViewFactory();
41 private DBOFactory<Table> tableFactory = new Table.TableFactory();
42 private DBOFactory<Sequence> sequenceFactory = new Sequence.SequenceFactory();
43 private DBOFactory<Index> indexFactory = new Index.IndexFactory();
44 private DBOFactory<Constraint> constraintFactory = new Constraint.ConstraintFactory();
45
46 public ZipBackup(File file, String jdbcUrl) {
47 this.file = file;
48 this.jdbcUrl = jdbcUrl;
49 }
50
51 public ZipBackup(Map<String,String> params) {
52 this(params.get("filename") == null ? null : new File(params.get("filename")),
53 buildJdbcUrl(params));
54 }
55
56 public void dumpAll(DataFilter dataFilter) {
57 dumpAll(dataFilter, DEFAULT_BATCH_SIZE);
58 }
59
60 public void dumpAll(DataFilter dataFilter, int batchSize) {
61 debug("starting full dump at " + new Date());
62 Schema.CachingSchemaFactory cachingSchemaFactory = new Schema.CachingSchemaFactory();
63 schemaFactory = cachingSchemaFactory;
64 Connection con = null;
65 ZipOutputStream zos = null;
66 try {
67 zos = getZipOutputStream();
68 con = DriverManager.getConnection(jdbcUrl);
69 con.setReadOnly(true);
70 con.setAutoCommit(true);
71 timerStart("schemas");
72 Collection<Schema> schemas = cachingSchemaFactory.getDbBackupObjects(con, null);
73 setTotalCount(schemas.size());
74 dumpSchemasSql(schemas, dataFilter, con, zos);
75 debug(schemas.size() + " schemas to be dumped");
76 timerEnd("schemas");
77 debug("begin dumping schemas");
78 Collection<Schema> batch;
79 while (! (batch = cachingSchemaFactory.nextBatch(con, batchSize)).isEmpty()) {
80 viewFactory = new View.CachingViewFactory(cachingSchemaFactory);
81 tableFactory = new Table.CachingTableFactory(cachingSchemaFactory);
82 sequenceFactory = new Sequence.CachingSequenceFactory(cachingSchemaFactory);
83 indexFactory = new Index.CachingIndexFactory(cachingSchemaFactory, (Table.CachingTableFactory)tableFactory);
84 constraintFactory = new Constraint.CachingConstraintFactory(cachingSchemaFactory, (Table.CachingTableFactory)tableFactory);
85 for (Schema schema : batch) {
86 dump(schema, dataFilter, con, zos);
87 }
88 con.close();
89 con = DriverManager.getConnection(jdbcUrl);
90 con.setReadOnly(true);
91 con.setAutoCommit(true);
92 }
93 printTimings();
94 } catch (SQLException e) {
95 throw new RuntimeException(e.getMessage(), e);
96 } catch (IOException e) {
97 throw new RuntimeException(e.getMessage(), e);
98 } finally {
99 try {
100 if (con != null) con.close();
101 } catch (SQLException ignore) {}
102 try {
103 if (zos != null) zos.close();
104 } catch (IOException e) {
105 throw new RuntimeException(e.getMessage(), e);
106 }
107 }
108 debug("finished full dump at " + new Date());
109 }
110
111 public void dump(Iterable<String> schemaNames, DataFilter dataFilter) {
112 Connection con = null;
113 try {
114 con = DriverManager.getConnection(jdbcUrl);
115 con.setReadOnly(true);
116 con.setAutoCommit(false);
117 con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
118 dump(schemaNames, dataFilter, con);
119 } catch (SQLException e) {
120 throw new RuntimeException(e.getMessage(), e);
121 } finally {
122 try {
123 if (con != null) con.close();
124 } catch (SQLException ignore) {}
125 }
126 }
127
128 public void dump(Iterable<String> schemaNames, DataFilter dataFilter, Connection con) {
129 ZipOutputStream zos = null;
130 try {
131 zos = getZipOutputStream();
132 timerStart("schemas");
133 List<Schema> schemas = new ArrayList<Schema>();
134 for (String schemaName : schemaNames) {
135 Schema schema = schemaFactory.getDbBackupObject(con, schemaName, null);
136 if (schema == null)
137 throw new RuntimeException("schema " + schemaName + " not found in database");
138 schemas.add(schema);
139 }
140 setTotalCount(schemas.size());
141 dumpSchemasSql(schemas, dataFilter, con, zos);
142 timerEnd("schemas");
143 for (Schema schema : schemas) {
144 dump(schema, dataFilter, con, zos);
145 }
146 printTimings();
147 } catch (SQLException e) {
148 throw new RuntimeException(e.getMessage(), e);
149 } catch (IOException e) {
150 throw new RuntimeException(e.getMessage(), e);
151 } finally {
152 try {
153 if (zos != null) zos.close();
154 } catch (IOException e) {
155 throw new RuntimeException(e.getMessage(), e);
156 }
157 }
158 }
159
160 private ZipOutputStream getZipOutputStream() throws IOException {
161 if (file != null) {
162 if (file.length() > 0) {
163 throw new RuntimeException("destination file is not empty");
164 }
165 return new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(file)));
166 } else {
167 return new ZipOutputStream(System.out);
168 }
169 }
170
171 private void dumpSchemasSql(Iterable<Schema> schemas, DataFilter dataFilter, Connection con, ZipOutputStream zos) {
172 try {
173 zos.putNextEntry(new ZipEntry(zipRoot));
174 putSqlZipEntry(zos, zipRoot+"schemas.sql", schemas, dataFilter);
175 zos.putNextEntry(new ZipEntry(zipRoot + "schemas/"));
176 } catch (IOException e) {
177 throw new RuntimeException(e.getMessage(), e);
178 }
179 }
180
181 private void dump(Schema schema, DataFilter dataFilter, Connection con, ZipOutputStream zos) {
182 try {
183 String schemaRoot = zipRoot + "schemas/" + schema.getName() + "/";
184 zos.putNextEntry(new ZipEntry(schemaRoot));
185
186 timerStart("sequences");
187 Iterable<Sequence> sequences = sequenceFactory.getDbBackupObjects(con, schema);
188 putSqlZipEntry(zos, schemaRoot + "sequences.sql", sequences, dataFilter);
189 timerEnd("sequences");
190
191 Iterable<Table> tables = tableFactory.getDbBackupObjects(con, schema);
192 putSqlZipEntry(zos, schemaRoot + "tables.sql", tables, dataFilter);
193
194 timerStart("table data");
195 zos.putNextEntry(new ZipEntry(schemaRoot + "tables/"));
196 for (Table table : tables) {
197 if (dataFilter.dumpData(schema.getName(), table.getName())) {
198 zos.putNextEntry(new ZipEntry(schemaRoot + "tables/" + table.getName()));
199 table.dump(con, zos);
200 }
201 }
202 timerEnd("table data");
203
204 timerStart("views");
205 Iterable<View> views = viewFactory.getDbBackupObjects(con, schema);
206 putSqlZipEntry(zos, schemaRoot + "views.sql", views, dataFilter);
207 timerEnd("views");
208
209 timerStart("indexes");
210 Iterable<Index> indexes = indexFactory.getDbBackupObjects(con, schema);
211 putSqlZipEntry(zos, schemaRoot + "indexes.sql", indexes, dataFilter);
212 timerEnd("indexes");
213
214 timerStart("constraints");
215 Iterable<Constraint> constraints = constraintFactory.getDbBackupObjects(con, schema);
216 putSqlZipEntry(zos, schemaRoot + "constraints.sql", constraints, dataFilter);
217 timerEnd("constraints");
218
219 processedSchema();
220
221 } catch (SQLException e) {
222 throw new RuntimeException("error dumping schema " + schema.getName(), e);
223 } catch (IOException e) {
224 throw new RuntimeException("error dumping schema " + schema.getName(), e);
225 }
226 }
227
228 private void putSqlZipEntry(ZipOutputStream zos, String name,
229 Iterable<? extends DbBackupObject> dbBackupObjects, DataFilter dataFilter) throws IOException {
230 zos.putNextEntry(new ZipEntry(name));
231 for (DbBackupObject o : dbBackupObjects) {
232 zos.write(o.getSql(dataFilter).getBytes());
233 }
234 }
235
236
237 public List<String> schemasInBackup() {
238 ZipFile zipFile = null;
239 try {
240 zipFile = new ZipFile(file);
241 return new ArrayList<String>(getSchemaTables(zipFile).keySet());
242 } catch (IOException e) {
243 throw new RuntimeException(e.getMessage(), e);
244 } finally {
245 try {
246 if (zipFile != null) zipFile.close();
247 } catch (IOException ignore) {}
248 }
249 }
250
251 public void restoreSchema(String schema) {
252 restoreSchemaTo(schema, schema);
253 }
254
255 public void restoreSchemaTo(String schema, String toSchema) {
256 Connection con = null;
257 try {
258 con = DriverManager.getConnection(jdbcUrl);
259 con.setAutoCommit(false);
260 restoreSchemaTo(schema, toSchema, con);
261 con.commit();
262 } catch (Exception e) {
263 try {
264 if (con != null) con.rollback();
265 } catch (SQLException ignore) {}
266 throw new RuntimeException(e.getMessage(), e);
267 } finally {
268 try {
269 if (con != null) con.close();
270 } catch (SQLException ignore) {}
271 }
272 }
273
274 public void restoreSchemaTo(String schema, String toSchema, Connection con) {
275 ZipFile zipFile = null;
276 try {
277 zipFile = new ZipFile(file);
278 restoreSchema(schema, toSchema, toSchema, zipFile, con);
279 printTimings();
280 } catch (IOException e) {
281 throw new RuntimeException(e.getMessage(), e);
282 } finally {
283 try {
284 if (zipFile != null) zipFile.close();
285 } catch (IOException ignore) {}
286 }
287 }
288
289 public void restoreAll() {
290 debug("starting full restore at " + new Date());
291 ZipFile zipFile = null;
292 Connection con = null;
293 try {
294 con = DriverManager.getConnection(jdbcUrl);
295 con.setAutoCommit(false);
296 zipFile = new ZipFile(file);
297
298 timerStart("schemas");
299 restoreSchemasSql(zipFile, con);
300 List<String> schemas = schemasInBackup();
301 setTotalCount(schemas.size());
302 timerEnd("schemas");
303
304 int count = 0;
305 for (String schemaName : schemas) {
306 restoreSchema(schemaName, schemaName, schemaName, zipFile, con);
307 if (++count%100 == 1) con.commit(); // commit every 100 schemas
308 }
309
310 con.commit();
311 printTimings();
312 } catch (Exception e) {
313 try {
314 if (con != null) con.rollback();
315 } catch (SQLException ignore) {}
316 throw new RuntimeException(e.getMessage(), e);
317 } finally {
318 try {
319 if (con != null) con.close();
320 } catch (SQLException ignore) {}
321 try {
322 if (zipFile != null) zipFile.close();
323 } catch (IOException ignore) {}
324 }
325 debug("finished full restore at " + new Date());
326 }
327
328 private void restoreSchema(String fromSchemaName, String toSchemaName, String toOwner, ZipFile zipFile, Connection con) {
329 try {
330 timerStart("schemas");
331 boolean isNewSchema = !toSchemaName.equals(fromSchemaName);
332 Schema toSchema = schemaFactory.getDbBackupObject(con, toSchemaName, null);
333 if (toSchema == null)
334 toSchema = Schema.createSchema(con, toSchemaName, toOwner, schemaFactory);
335 else
336 toOwner = toSchema.getOwner(); // preserve existing owner
337 setRole(con, toOwner);
338 setSearchPath(con, toSchema);
339 timerEnd("schemas");
340
341 String schemaRoot = zipRoot + "schemas/" + fromSchemaName + "/";
342
343 timerStart("sequences");
344 ZipEntry sequencesSql = zipFile.getEntry(schemaRoot + "sequences.sql");
345 execSqlZipEntry(zipFile, con, sequencesSql, isNewSchema);
346 timerEnd("sequences");
347
348 timerStart("tables");
349 ZipEntry tablesSql = zipFile.getEntry(schemaRoot + "tables.sql");
350 execSqlZipEntry(zipFile, con, tablesSql, isNewSchema);
351 timerEnd("tables");
352
353 timerStart("table data");
354 Set<ZipEntry> tableEntries = getSchemaTables(zipFile).get(fromSchemaName);
355 for (ZipEntry tableEntry : tableEntries) {
356 String tableName = parseTable(tableEntry.getName());
357 Table table = tableFactory.getDbBackupObject(con, tableName, toSchema);
358 if (!table.getOwner().equals(toOwner) && !isNewSchema) {
359 setRole(con, table.getOwner());
360 }
361 table.restore(zipFile.getInputStream(tableEntry), con);
362 if (!table.getOwner().equals(toOwner) && !isNewSchema) {
363 setRole(con, toOwner);
364 }
365 }
366 timerEnd("table data");
367
368 timerStart("views");
369 ZipEntry viewsSql = zipFile.getEntry(schemaRoot + "views.sql");
370 execSqlZipEntry(zipFile, con, viewsSql, isNewSchema);
371 timerEnd("views");
372
373 timerStart("indexes");
374 ZipEntry indexesSql = zipFile.getEntry(schemaRoot + "indexes.sql");
375 execSqlZipEntry(zipFile, con, indexesSql, isNewSchema);
376 timerEnd("indexes");
377
378 timerStart("constraints");
379 ZipEntry constraintsSql = zipFile.getEntry(schemaRoot + "constraints.sql");
380 execSqlZipEntry(zipFile, con, constraintsSql, isNewSchema);
381 timerEnd("constraints");
382
383 resetSearchPath(con);
384 resetRole(con);
385 processedSchema();
386 } catch (Exception e) {
387 throw new RuntimeException(
388 "error restoring " + fromSchemaName +
389 " to " + toSchemaName, e
390 );
391 }
392 }
393
394 private void restoreSchemasSql(ZipFile zipFile, Connection con) {
395 try {
396 ZipEntry schemasSql = zipFile.getEntry(zipRoot + "schemas.sql");
397 if (schemasSql!=null) execSqlZipEntry(zipFile, con, schemasSql, false);
398 } catch (SQLException e) {
399 throw new RuntimeException(e.getMessage(), e);
400 } catch (IOException e) {
401 throw new RuntimeException(e.getMessage(), e);
402 }
403 }
404
405 private void execSqlZipEntry(ZipFile zipFile, Connection con, ZipEntry zipEntry, boolean isNewSchema) throws IOException, SQLException {
406 BufferedReader reader = null;
407 try {
408 reader = new BufferedReader(new InputStreamReader(zipFile.getInputStream(zipEntry)));
409 for (String sql = reader.readLine(); sql != null; sql = reader.readLine()) {
410 if (isNewSchema) { // skip any role and ownership changes if restoring to new schema
411 if (sql.startsWith("SET ROLE ") || (sql.startsWith("ALTER ") && sql.contains(" OWNER TO "))) {
412 continue;
413 }
414 }
415 PreparedStatement stmt = null;
416 try {
417 stmt = con.prepareStatement(sql);
418 if (sql.startsWith("SELECT ")) {
419 stmt.executeQuery();
420 } else {
421 stmt.executeUpdate();
422 }
423 } catch (SQLException e) {
424 throw new RuntimeException("error executing sql: " + sql, e);
425 } finally {
426 if (stmt != null) stmt.close();
427 }
428 }
429 } finally {
430 if (reader != null) reader.close();
431 }
432 }
433
434
435 private Map<String,Set<ZipEntry>> schemaTables = null;
436
437 private Map<String,Set<ZipEntry>> getSchemaTables(ZipFile zipFile) {
438 if (schemaTables == null) {
439 schemaTables = new HashMap<String,Set<ZipEntry>>();
440 Enumeration<? extends ZipEntry> entries = zipFile.entries();
441 while (entries.hasMoreElements()) {
442 ZipEntry entry = entries.nextElement();
443 String schema = parseSchema(entry.getName());
444 if (schema == null) continue;
445 Set<ZipEntry> tables = schemaTables.get(schema);
446 if (tables == null) {
447 tables = new HashSet<ZipEntry>();
448 schemaTables.put(schema, tables);
449 }
450 if (isTable(entry.getName())) {
451 tables.add(entry);
452 }
453 }
454 }
455 return schemaTables;
456 }
457
458 private void setRole(Connection con, String role) throws SQLException {
459 PreparedStatement stmt = null;
460 try {
461 stmt = con.prepareStatement("SET ROLE " + role);
462 //stmt.setString(1, role);
463 stmt.executeUpdate();
464 } finally {
465 if (stmt != null) stmt.close();
466 }
467 }
468
469 private void setSearchPath(Connection con, Schema schema) throws SQLException {
470 PreparedStatement stmt = null;
471 try {
472 stmt = con.prepareStatement("SET SEARCH_PATH = " + schema.getName());
473 //stmt.setString(1, schema.getName());
474 stmt.executeUpdate();
475 } finally {
476 if (stmt != null) stmt.close();
477 }
478 }
479
480 private void resetRole(Connection con) throws SQLException {
481 PreparedStatement stmt = null;
482 try {
483 stmt = con.prepareStatement("RESET ROLE");
484 stmt.executeUpdate();
485 } finally {
486 if (stmt != null) stmt.close();
487 }
488 }
489
490 private void resetSearchPath(Connection con) throws SQLException {
491 PreparedStatement stmt = null;
492 try {
493 stmt = con.prepareStatement("RESET SEARCH_PATH");
494 stmt.executeUpdate();
495 } finally {
496 if (stmt != null) stmt.close();
497 }
498 }
499
500 private static boolean isTable(String name) {
501 int i = name.indexOf("/tables/");
502 return i > -1 && i < name.length() - "/tables/".length();
503 }
504
505 private static String parseTable(String name) {
506 int from = name.indexOf("/tables/") + "/tables/".length();
507 return name.substring(from);
508 }
509
510 private static String parseSchema(String name) {
511 int i = name.indexOf("/schemas/");
512 if (i < 0) return null;
513 int from = i + "/schemas/".length();
514 int to = name.indexOf('/', from);
515 if (to < 0) return null;
516 return name.substring(from, to);
517 }
518
519
520
521 private static class Timing {
522 private final Map<String,Long> timerMap = new HashMap<String,Long>();
523 private final long startTime = System.currentTimeMillis();
524 private final PrintStream ps;
525 private int totalCount = 1;
526 private int processedCount;
527 private long time = -1;
528
529 private Timing(PrintStream ps) {
530 this.ps = ps;
531 }
532
533 void start(String step) {
534 if (time != -1) throw new RuntimeException();
535 time = System.currentTimeMillis();
536 }
537
538 void end(String step) {
539 if (time == -1) throw new RuntimeException();
540 long thisTime = System.currentTimeMillis() - time;
541 Long oldTotal = timerMap.get(step);
542 if (oldTotal != null) thisTime += oldTotal.longValue();
543 timerMap.put(step, new Long(thisTime));
544 time = -1;
545 }
546
547 void processedSchema() {
548 if (++processedCount % 100 == 1) print();
549 }
550
551 void print() {
552 long totalTime = System.currentTimeMillis() - startTime;
553 long remaining = totalTime;
554 for (Map.Entry<String,Long> entry : timerMap.entrySet()) {
555 long t = entry.getValue();
556 remaining = remaining - t;
557 ps.print(entry.getKey() + ": \t");
558 ps.println(t/1000 + " s \t" + t*100/totalTime + " %");
559 }
560 ps.println("others: \t" + remaining/1000 + " s \t" + remaining*100/totalTime + " %");
561 ps.println("total time: \t" + totalTime/1000 + " s");
562 ps.println("processed: \t" + processedCount + " out of " + totalCount +
563 " schemas \t" + processedCount*100/totalCount + "%");
564 ps.println();
565 }
566
567 void print(String msg) {
568 ps.println(msg);
569 }
570
571 }
572
573 static final ThreadLocal<Timing> timing = new ThreadLocal<Timing>() {
574 @Override
575 protected Timing initialValue() {
576 return new Timing(null) {
577 void start(String step) {}
578 void end(String step) {}
579 void processedSchema() {}
580 void print() {}
581 void print(String msg) {}
582 };
583 }
584 };
585
586 public static void setTimingOutput(PrintStream ps) {
587 timing.set(new Timing(ps));
588 }
589
590 static void timerStart(String step) {
591 timing.get().start(step);
592 }
593
594 static void timerEnd(String step) {
595 timing.get().end(step);
596 }
597
598 static void setTotalCount(int n) {
599 timing.get().totalCount = n;
600 }
601
602 static void processedSchema() {
603 timing.get().processedSchema();
604 }
605
606 static void printTimings() {
607 timing.get().print();
608 }
609
610 static void debug(String msg) {
611 timing.get().print("at " + (System.currentTimeMillis() - timing.get().startTime)/1000 + " s:");
612 timing.get().print(msg);
613 }
614
615 static String buildJdbcUrl(Map<String,String> params) {
616 StringBuilder buf = new StringBuilder();
617 buf.append("jdbc:postgresql://");
618 String hostname = params.get("hostname");
619 if (hostname == null) hostname = "localhost";
620 buf.append(hostname);
621 String port = params.get("port");
622 if (port == null) port = "5432";
623 buf.append(":").append(port);
624 buf.append("/");
625 String username = params.get("user");
626 if (username == null) username = "postgres";
627 String database = params.get("database");
628 if (database == null) database = username;
629 buf.append(database);
630 buf.append("?user=");
631 buf.append(username);
632 String password = params.get("password");
633 if (password != null) buf.append("&password=").append(password);
634 else {
635 File pgpass = new File(System.getProperty("user.home"), ".pgpass");
636 if (pgpass.exists()) {
637 BufferedReader r = null;
638 try {
639 r = new BufferedReader(new FileReader(pgpass));
640 String line;
641 while ((line = r.readLine()) != null) {
642 if (line.startsWith("#")) continue;
643 String[] entries = line.split(":");
644 if (entries.length != 5) throw new RuntimeException(
645 "unsupported pgpass file format, better specify password on command line");
646 if (! (hostname.equals(entries[0]) || "*".equals(entries[0])) ) continue;
647 if (! (port.equals(entries[1]) || "*".equals(entries[1])) ) continue;
648 if (! (database.equals(entries[2]) || "*".equals(entries[2])) ) continue;
649 if (! (username.equals(entries[3]) || "*".equals(entries[3])) ) continue;
650 buf.append("&password=").append(entries[4]);
651 break;
652 }
653 } catch (IOException e) {
654 throw new RuntimeException("failed to read the pgpass file");
655 } finally {
656 try {
657 if (r != null) r.close();
658 } catch (IOException ignore) {}
659 }
660 }
661 }
662 return buf.toString();
663 }
664
665
666
667 // new - fschmidt
668
669 private Set<String> getEntry(ZipFile zipFile, String entryName)
670 throws IOException, SQLException
671 {
672 Set<String> set = new HashSet<String>();
673 ZipEntry zipEntry = zipFile.getEntry(entryName);
674 BufferedReader reader = new BufferedReader(new InputStreamReader(zipFile.getInputStream(zipEntry)));
675 for (String sql = reader.readLine(); sql != null; sql = reader.readLine()) {
676 // skip any role and ownership changes if restoring to new schema
677 if (sql.startsWith("SET ROLE ") || (sql.startsWith("ALTER ") && sql.contains(" OWNER TO "))) {
678 continue;
679 }
680 set.add(sql);
681 }
682 reader.close();
683 return set;
684 }
685
686 private boolean matches(ZipFile zipFile1,String schemaRoot1,ZipFile zipFile2,String schemaRoot2,String what)
687 throws IOException, SQLException
688 {
689 Set<String> entry1 = getEntry( zipFile1, schemaRoot1 + what );
690 Set<String> entry2 = getEntry( zipFile2, schemaRoot2 + what );
691 /*
692 if( !entry1.equals(entry2) ) {
693 System.out.println("qqqqqqqqqqqqqqqqqqqqqq "+what);
694 System.out.println(entry1);
695 System.out.println(entry2);
696 }
697 */
698 return entry1.equals(entry2);
699 }
700
701 public boolean compareTo(String schema) {
702 try {
703 List<String> schemasInBackup = schemasInBackup();
704 if( schemasInBackup.size() != 1 )
705 throw new RuntimeException("must have 1 schema");
706 String backupSchema = schemasInBackup.get(0);
707
708 File tmpFile = File.createTempFile("jdbcpgbackup",null);
709 tmpFile.deleteOnExit();
710 ZipBackup backup = new ZipBackup( tmpFile, jdbcUrl );
711 backup.dump( Collections.singleton(schema), DataFilter.NO_DATA );
712
713 ZipFile zipFile1 = new ZipFile(file);
714 ZipFile zipFile2 = new ZipFile(tmpFile);
715 String schemaRoot1 = zipRoot + "schemas/" + backupSchema + "/";
716 String schemaRoot2 = zipRoot + "schemas/" + schema + "/";
717
718 try {
719 if( !matches(zipFile1,schemaRoot1,zipFile2,schemaRoot2,"sequences.sql") )
720 return false;
721 if( !matches(zipFile1,schemaRoot1,zipFile2,schemaRoot2,"tables.sql") )
722 return false;
723 if( !matches(zipFile1,schemaRoot1,zipFile2,schemaRoot2,"views.sql") )
724 return false;
725 if( !matches(zipFile1,schemaRoot1,zipFile2,schemaRoot2,"indexes.sql") )
726 return false;
727 if( !matches(zipFile1,schemaRoot1,zipFile2,schemaRoot2,"constraints.sql") )
728 return false;
729 return true;
730 } finally {
731 tmpFile.delete();
732 }
733 } catch (IOException e) {
734 throw new RuntimeException(e);
735 } catch (SQLException e) {
736 throw new RuntimeException(e);
737 }
738 }
739 }