Mercurial Hosting > nabble
comparison src/jdbcpgbackup/Table.java @ 0:7ecd1a4ef557
add content
author | Franklin Schmidt <fschmidt@gmail.com> |
---|---|
date | Thu, 21 Mar 2019 19:15:52 -0600 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:7ecd1a4ef557 |
---|---|
1 /* Copyright (c) 2012 Tomislav Gountchev <tomi@gountchev.net> */ | |
2 | |
3 package jdbcpgbackup; | |
4 | |
5 import java.io.IOException; | |
6 import java.io.InputStream; | |
7 import java.io.OutputStream; | |
8 import java.sql.Connection; | |
9 import java.sql.PreparedStatement; | |
10 import java.sql.ResultSet; | |
11 import java.sql.SQLException; | |
12 import java.util.ArrayList; | |
13 import java.util.Arrays; | |
14 import java.util.Comparator; | |
15 import java.util.HashMap; | |
16 import java.util.HashSet; | |
17 import java.util.List; | |
18 import java.util.Map; | |
19 import java.util.Set; | |
20 import java.util.TreeSet; | |
21 | |
22 import org.postgresql.PGConnection; | |
23 import org.postgresql.copy.CopyManager; | |
24 import org.postgresql.core.BaseConnection; | |
25 | |
26 final class Table extends DbBackupObject { | |
27 | |
28 static class TableFactory implements DBOFactory<Table> { | |
29 | |
30 @Override | |
31 public Iterable<Table> getDbBackupObjects(Connection con, Schema schema) throws SQLException { | |
32 ZipBackup.timerStart("tables"); | |
33 List<Table> tables = new ArrayList<Table>(); | |
34 PreparedStatement stmt = null; | |
35 try { | |
36 stmt = con.prepareStatement("SELECT pg_get_userbyid(c.relowner) AS tableowner, " + | |
37 "c.relname AS tablename, c.oid AS table_oid " + | |
38 "FROM pg_class c " + | |
39 "WHERE c.relkind = 'r'::\"char\" AND c.relnamespace = ?"); | |
40 stmt.setInt(1, schema.getOid()); | |
41 ResultSet rs = stmt.executeQuery(); | |
42 while (rs.next()) { | |
43 Table table = new Table(rs.getString("tablename"), schema, rs.getString("tableowner")); | |
44 loadColumns(con, table, rs.getInt("table_oid")); | |
45 tables.add(table); | |
46 } | |
47 rs.close(); | |
48 } finally { | |
49 if (stmt != null) stmt.close(); | |
50 } | |
51 ZipBackup.timerEnd("tables"); | |
52 return tables; | |
53 } | |
54 | |
55 // does not load columns | |
56 @Override | |
57 public Table getDbBackupObject(Connection con, String tableName, Schema schema) throws SQLException { | |
58 return getDbBackupObject(con, tableName, schema, false); | |
59 } | |
60 | |
61 public Table getDbBackupObject(Connection con, String tableName, Schema schema, boolean loadColumns) throws SQLException { | |
62 Table table = null; | |
63 PreparedStatement stmt = null; | |
64 try { | |
65 stmt = con.prepareStatement("SELECT pg_get_userbyid(c.relowner) AS tableowner, c.oid AS table_oid " + | |
66 "FROM pg_class c " + | |
67 "WHERE c.relkind = 'r'::\"char\" AND c.relnamespace = ? " + | |
68 "AND c.relname = ?"); | |
69 stmt.setInt(1, schema.getOid()); | |
70 stmt.setString(2, tableName); | |
71 ResultSet rs = stmt.executeQuery(); | |
72 if (rs.next()) { | |
73 table = new Table(tableName, schema, rs.getString("tableowner")); | |
74 if (loadColumns) loadColumns(con, table, rs.getInt("table_oid")); | |
75 } else { | |
76 throw new RuntimeException("no such table: " + tableName); | |
77 } | |
78 rs.close(); | |
79 } finally { | |
80 if (stmt != null) stmt.close(); | |
81 } | |
82 return table; | |
83 } | |
84 | |
85 private void loadColumns(Connection con, Table table, int tableOid) throws SQLException { | |
86 PreparedStatement stmt = null; | |
87 try { | |
88 stmt = con.prepareStatement( | |
89 "SELECT a.attname,a.atttypid," + | |
90 "a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) AS attnotnull,a.atttypmod," + | |
91 "row_number() OVER (PARTITION BY a.attrelid ORDER BY a.attnum) AS attnum, " + | |
92 "pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc,t.typtype " + | |
93 "FROM pg_catalog.pg_attribute a " + | |
94 "JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) " + | |
95 "LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) " + | |
96 "WHERE a.attnum > 0 AND NOT a.attisdropped " + | |
97 "AND a.attrelid = ? "); | |
98 stmt.setInt(1, tableOid); | |
99 ResultSet rs = stmt.executeQuery(); | |
100 while (rs.next()) { | |
101 table.columns.add(table.new Column((BaseConnection)con, rs)); | |
102 } | |
103 rs.close(); | |
104 } finally { | |
105 if (stmt != null) stmt.close(); | |
106 } | |
107 } | |
108 | |
109 } | |
110 | |
111 static class CachingTableFactory extends CachingDBOFactory<Table> { | |
112 | |
113 private final Map<Integer,Table> oidMap; | |
114 | |
115 protected CachingTableFactory(Schema.CachingSchemaFactory schemaFactory) { | |
116 super(schemaFactory); | |
117 oidMap = new HashMap<Integer,Table>(); | |
118 } | |
119 | |
120 @Override | |
121 protected PreparedStatement getAllStatement(Connection con) throws SQLException { | |
122 return con.prepareStatement("SELECT c.relnamespace AS schema_oid, c.relname AS tablename, " + | |
123 "pg_get_userbyid(c.relowner) AS tableowner, c.oid " + | |
124 "FROM pg_class c " + | |
125 "WHERE c.relkind = 'r'::\"char\""); | |
126 } | |
127 | |
128 @Override | |
129 protected Table newDbBackupObject(Connection con, ResultSet rs, Schema schema) throws SQLException { | |
130 Table table = new Table(rs.getString("tablename"), schema, rs.getString("tableowner")); | |
131 oidMap.put(rs.getInt("oid"), table); | |
132 return table; | |
133 } | |
134 | |
135 @Override | |
136 protected void loadMap(Connection con) throws SQLException { | |
137 ZipBackup.timerStart("tables"); | |
138 super.loadMap(con); | |
139 ZipBackup.timerEnd("tables"); | |
140 loadColumns(con); | |
141 } | |
142 | |
143 Table getTable(int table_oid) { | |
144 return oidMap.get(table_oid); | |
145 } | |
146 | |
147 private void loadColumns(Connection con) throws SQLException { | |
148 ZipBackup.timerStart("load columns"); | |
149 ZipBackup.debug("begin loading columns..."); | |
150 PreparedStatement stmt = null; | |
151 try { | |
152 stmt = con.prepareStatement( | |
153 "SELECT a.attrelid AS table_oid, a.attname, a.atttypid," + | |
154 "a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) AS attnotnull, a.atttypmod, " + | |
155 "row_number() OVER (PARTITION BY a.attrelid ORDER BY a.attnum) AS attnum, " + | |
156 "pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc, t.typtype " + | |
157 "FROM pg_catalog.pg_attribute a " + | |
158 "JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) " + | |
159 "LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) " + | |
160 "WHERE a.attnum > 0 AND NOT a.attisdropped "); | |
161 int count = 0; | |
162 ResultSet rs = stmt.executeQuery(); | |
163 while (rs.next()) { | |
164 int oid = rs.getInt("table_oid"); | |
165 Table table = oidMap.get(oid); | |
166 if (table != null) { | |
167 table.columns.add(table.new Column((BaseConnection)con, rs)); | |
168 if (++count%100000 == 1) ZipBackup.debug("loaded " + count + " columns"); | |
169 } | |
170 } | |
171 rs.close(); | |
172 } finally { | |
173 if (stmt != null) stmt.close(); | |
174 } | |
175 ZipBackup.debug("end loading columns"); | |
176 ZipBackup.timerEnd("load columns"); | |
177 } | |
178 | |
179 } | |
180 | |
181 | |
182 /* | |
183 private static void loadSequences(Connection con, Schema schema, Map<String,Table> tables) throws SQLException { | |
184 ZipBackup.timerStart("load sequences"); | |
185 PreparedStatement stmt = con.prepareStatement( | |
186 "SELECT c.relname AS sequencename, d.refobjsubid AS columnid, p.relname AS tablename " + | |
187 "FROM pg_class c, pg_depend d, pg_class p " + | |
188 "WHERE d.refobjid = p.oid " + | |
189 "AND p.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = ?) " + | |
190 "AND c.oid = d.objid AND c.relkind = 'S'"); | |
191 stmt.setString(1, schema.getName()); | |
192 ResultSet rs = stmt.executeQuery(); | |
193 while (rs.next()) { | |
194 tables.get(rs.getString("tablename")).columns.get(rs.getInt("columnid")).setSequenceName(rs.getString("sequencename")); | |
195 } | |
196 rs.close(); | |
197 stmt.close(); | |
198 ZipBackup.timerEnd("load sequences"); | |
199 } | |
200 */ | |
201 | |
202 private final Set<Column> columns = new TreeSet<Column>( | |
203 new Comparator<Column>() { | |
204 public int compare(Column a, Column b) { | |
205 return a.position - b.position; | |
206 }; | |
207 }); | |
208 | |
209 private Table(String name, Schema schema, String owner) { | |
210 super(name, schema, owner); | |
211 } | |
212 | |
213 @Override | |
214 protected StringBuilder appendCreateSql(StringBuilder buf) { | |
215 buf.append("CREATE TABLE ").append(getName()); | |
216 buf.append(" ("); | |
217 for (Column column : columns) { | |
218 column.appendSql(buf); | |
219 buf.append(","); | |
220 } | |
221 buf.deleteCharAt(buf.length()-1); | |
222 buf.append(")"); | |
223 buf.append(" ;\n"); | |
224 for (Column column : columns) { | |
225 column.appendSequenceSql(buf); | |
226 } | |
227 return buf; | |
228 } | |
229 | |
230 void dump(Connection con, OutputStream os) throws SQLException, IOException { | |
231 CopyManager copyManager = ((PGConnection)con).getCopyAPI(); | |
232 copyManager.copyOut("COPY " + getFullname() + " TO STDOUT BINARY", os); | |
233 } | |
234 | |
235 void restore(InputStream is, Connection con) throws SQLException, IOException { | |
236 CopyManager copyManager = ((PGConnection)con).getCopyAPI(); | |
237 copyManager.copyIn("COPY " + getFullname() + " FROM STDIN BINARY", is); | |
238 } | |
239 | |
240 private static final Set<String> appendSizeTo = new HashSet<String>( | |
241 Arrays.asList("bit", "varbit", "bit varying", "bpchar", "char", "varchar", "character", "character varying")); | |
242 | |
243 private static final Set<String> appendPrecisionTo = new HashSet<String>( | |
244 Arrays.asList("time", "timestamp", "timetz", "timestamptz")); | |
245 | |
246 /* | |
247 private static final Map<String,String> aliasMap = new HashMap<String,String>(); | |
248 static { | |
249 aliasMap.put("serial","integer"); | |
250 aliasMap.put("serial4","integer"); | |
251 aliasMap.put("serial8","bigint"); | |
252 aliasMap.put("bigserial","bigint"); | |
253 } | |
254 | |
255 private static String getAlias(String type) { | |
256 String alias = aliasMap.get(type); | |
257 return alias != null ? alias : type; | |
258 } | |
259 */ | |
260 | |
261 private class Column { | |
262 | |
263 private final String name; | |
264 private final String typeName; | |
265 private /*final*/ int columnSize; | |
266 private final int decimalDigits; | |
267 private final int nullable; | |
268 private final String defaultValue; | |
269 private final boolean isAutoincrement; | |
270 private final String sequenceName; | |
271 private final int position; | |
272 | |
273 private Column(BaseConnection con, ResultSet rs) throws SQLException { | |
274 | |
275 int typeOid = (int)rs.getLong("atttypid"); | |
276 int typeMod = rs.getInt("atttypmod"); | |
277 | |
278 position = rs.getInt("attnum"); | |
279 name = rs.getString("attname"); | |
280 typeName = con.getTypeInfo().getPGType(typeOid); | |
281 decimalDigits = con.getTypeInfo().getScale(typeOid, typeMod); | |
282 columnSize = con.getTypeInfo().getPrecision(typeOid, typeMod); | |
283 if (columnSize == 0) { | |
284 columnSize = con.getTypeInfo().getDisplaySize(typeOid, typeMod); | |
285 } | |
286 if (columnSize == Integer.MAX_VALUE) { | |
287 columnSize = 0; | |
288 } | |
289 nullable = rs.getBoolean("attnotnull") ? java.sql.DatabaseMetaData.columnNoNulls : java.sql.DatabaseMetaData.columnNullable; | |
290 String columnDef = rs.getString("adsrc"); | |
291 if (columnDef != null) { | |
292 defaultValue = columnDef.replace("nextval('" + schema.getName() + ".", "nextval('"); // remove schema name | |
293 isAutoincrement = columnDef.indexOf("nextval(") != -1; | |
294 } else { | |
295 defaultValue = null; | |
296 isAutoincrement = false; | |
297 } | |
298 if (isAutoincrement) { | |
299 PreparedStatement stmt = null; | |
300 try { | |
301 stmt = con.prepareStatement( | |
302 "SELECT pg_get_serial_sequence( ? , ? ) AS sequencename"); | |
303 stmt.setString(1, getFullname()); | |
304 stmt.setString(2, name); | |
305 ResultSet rs2 = stmt.executeQuery(); | |
306 if (rs2.next() && rs2.getString("sequencename") != null) { | |
307 sequenceName = rs2.getString("sequencename").replace(schema.getName() + ".", ""); | |
308 } else { | |
309 sequenceName = null; | |
310 } | |
311 rs2.close(); | |
312 } finally { | |
313 if (stmt != null) stmt.close(); | |
314 } | |
315 } else sequenceName = null; | |
316 } | |
317 | |
318 private StringBuilder appendSql(StringBuilder buf) { | |
319 buf.append(name).append(" "); | |
320 buf.append(typeName); | |
321 if (appendSizeTo.contains(typeName) && columnSize > 0) { | |
322 buf.append( "(").append(columnSize).append(")"); | |
323 } else if (appendPrecisionTo.contains(typeName) || typeName.startsWith("interval")) { | |
324 buf.append("(").append(decimalDigits).append(")"); | |
325 } else if ("numeric".equals(typeName) || "decimal".equals(typeName)) { | |
326 buf.append("(").append(columnSize).append(",").append(decimalDigits).append(")"); | |
327 } | |
328 if (defaultValue != null) { | |
329 buf.append(" DEFAULT ").append(defaultValue); | |
330 } | |
331 if (nullable == java.sql.DatabaseMetaData.columnNoNulls) { | |
332 buf.append(" NOT NULL"); | |
333 } | |
334 return buf; | |
335 } | |
336 | |
337 private StringBuilder appendSequenceSql(StringBuilder buf) { | |
338 if (sequenceName == null) return buf; | |
339 buf.append("ALTER SEQUENCE "); | |
340 buf.append(sequenceName); | |
341 buf.append(" OWNED BY "); | |
342 buf.append(getName()); | |
343 buf.append(".").append(name); | |
344 buf.append(" ;\n"); | |
345 return buf; | |
346 } | |
347 | |
348 } | |
349 | |
350 } |