001/*
002 * (C) Copyright 2006-2016 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.sql.jdbc;
020
021import java.io.Serializable;
022import java.sql.Array;
023import java.sql.BatchUpdateException;
024import java.sql.CallableStatement;
025import java.sql.PreparedStatement;
026import java.sql.ResultSet;
027import java.sql.SQLException;
028import java.sql.Types;
029import java.util.ArrayList;
030import java.util.Arrays;
031import java.util.Calendar;
032import java.util.Collection;
033import java.util.Collections;
034import java.util.HashMap;
035import java.util.HashSet;
036import java.util.Iterator;
037import java.util.LinkedHashMap;
038import java.util.LinkedList;
039import java.util.List;
040import java.util.ListIterator;
041import java.util.Map;
042import java.util.Map.Entry;
043import java.util.Set;
044import java.util.stream.Collectors;
045
046import javax.transaction.xa.XAException;
047import javax.transaction.xa.Xid;
048
049import org.apache.commons.lang.StringUtils;
050import org.nuxeo.ecm.core.api.ConcurrentUpdateException;
051import org.nuxeo.ecm.core.api.NuxeoException;
052import org.nuxeo.ecm.core.api.model.Delta;
053import org.nuxeo.ecm.core.storage.sql.ClusterInvalidator;
054import org.nuxeo.ecm.core.storage.sql.Invalidations;
055import org.nuxeo.ecm.core.storage.sql.InvalidationsPropagator;
056import org.nuxeo.ecm.core.storage.sql.Model;
057import org.nuxeo.ecm.core.storage.sql.PropertyType;
058import org.nuxeo.ecm.core.storage.sql.Row;
059import org.nuxeo.ecm.core.storage.sql.RowId;
060import org.nuxeo.ecm.core.storage.sql.RowMapper;
061import org.nuxeo.ecm.core.storage.sql.SelectionType;
062import org.nuxeo.ecm.core.storage.sql.SimpleFragment;
063import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelect;
064import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelection;
065import org.nuxeo.ecm.core.storage.sql.jdbc.db.Column;
066import org.nuxeo.ecm.core.storage.sql.jdbc.db.Table;
067import org.nuxeo.ecm.core.storage.sql.jdbc.db.Update;
068import org.nuxeo.runtime.api.Framework;
069import org.nuxeo.runtime.services.config.ConfigurationService;
070
071/**
072 * A {@link JDBCRowMapper} maps {@link Row}s to and from a JDBC database.
073 */
074public class JDBCRowMapper extends JDBCConnection implements RowMapper {
075
076    public static final int UPDATE_BATCH_SIZE = 100; // also insert/delete
077
078    public static final int DEBUG_MAX_TREE = 50;
079
080    /** Property to determine whether collection appends delete all then re-insert, or are optimized for append. */
081    public static final String COLLECTION_DELETE_BEFORE_APPEND_PROP = "org.nuxeo.vcs.list-delete-before-append";
082
083    /**
084     * Cluster invalidator, or {@code null} if this mapper does not participate in invalidation propagation (cluster
085     * invalidator, lock manager).
086     */
087    private final ClusterInvalidator clusterInvalidator;
088
089    private final InvalidationsPropagator invalidationsPropagator;
090
091    private final boolean collectionDeleteBeforeAppend;
092
093    private final CollectionIO aclCollectionIO;
094
095    private final CollectionIO scalarCollectionIO;
096
097    public JDBCRowMapper(Model model, SQLInfo sqlInfo, ClusterInvalidator clusterInvalidator,
098            InvalidationsPropagator invalidationsPropagator) {
099        super(model, sqlInfo);
100        this.clusterInvalidator = clusterInvalidator;
101        this.invalidationsPropagator = invalidationsPropagator;
102        ConfigurationService configurationService = Framework.getService(ConfigurationService.class);
103        collectionDeleteBeforeAppend = configurationService.isBooleanPropertyTrue(COLLECTION_DELETE_BEFORE_APPEND_PROP);
104        aclCollectionIO = new ACLCollectionIO(collectionDeleteBeforeAppend);
105        scalarCollectionIO = new ScalarCollectionIO(collectionDeleteBeforeAppend);
106    }
107
108    @Override
109    public Invalidations receiveInvalidations() {
110        if (clusterInvalidator != null) {
111            Invalidations invalidations = clusterInvalidator.receiveInvalidations();
112            // send received invalidations to all mappers
113            if (invalidations != null && !invalidations.isEmpty()) {
114                invalidationsPropagator.propagateInvalidations(invalidations, null);
115            }
116            return invalidations;
117        } else {
118            return null;
119        }
120    }
121
122
123    @Override
124    public void sendInvalidations(Invalidations invalidations) {
125        if (clusterInvalidator != null) {
126            clusterInvalidator.sendInvalidations(invalidations);
127        }
128    }
129
130    @Override
131    public void clearCache() {
132        // no cache
133    }
134
135    @Override
136    public long getCacheSize() {
137        return 0;
138    }
139
140    @Override
141    public void rollback(Xid xid) throws XAException {
142        try {
143            xaresource.rollback(xid);
144        } catch (XAException e) {
145            logger.error("XA error on rollback: " + e);
146            throw e;
147        }
148    }
149
150    protected CollectionIO getCollectionIO(String tableName) {
151        return tableName.equals(Model.ACL_TABLE_NAME) ? aclCollectionIO : scalarCollectionIO;
152    }
153
154    @Override
155    public Serializable generateNewId() {
156        try {
157            return dialect.getGeneratedId(connection);
158        } catch (SQLException e) {
159            throw new NuxeoException(e);
160        }
161    }
162
163    /*
164     * ----- RowIO -----
165     */
166
167    @Override
168    public List<? extends RowId> read(Collection<RowId> rowIds, boolean cacheOnly) {
169        List<RowId> res = new ArrayList<RowId>(rowIds.size());
170        if (cacheOnly) {
171            // return no data
172            for (RowId rowId : rowIds) {
173                res.add(new RowId(rowId));
174            }
175            return res;
176        }
177        // reorganize by table
178        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
179        for (RowId rowId : rowIds) {
180            Set<Serializable> ids = tableIds.get(rowId.tableName);
181            if (ids == null) {
182                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
183            }
184            ids.add(rowId.id);
185        }
186        // read on each table
187        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
188            String tableName = en.getKey();
189            Set<Serializable> ids = new HashSet<Serializable>(en.getValue());
190            int size = ids.size();
191            int chunkSize = sqlInfo.getMaximumArgsForIn();
192            List<Row> rows;
193            if (size > chunkSize) {
194                List<Serializable> idList = new ArrayList<Serializable>(ids);
195                rows = new ArrayList<Row>(size);
196                for (int start = 0; start < size; start += chunkSize) {
197                    int end = start + chunkSize;
198                    if (end > size) {
199                        end = size;
200                    }
201                    // needs to be Serializable -> copy
202                    List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
203                    List<Row> chunkRows;
204                    if (model.isCollectionFragment(tableName)) {
205                        chunkRows = readCollectionArrays(tableName, chunkIds);
206                    } else {
207                        chunkRows = readSimpleRows(tableName, chunkIds);
208                    }
209                    rows.addAll(chunkRows);
210                }
211            } else {
212                if (model.isCollectionFragment(tableName)) {
213                    rows = readCollectionArrays(tableName, ids);
214                } else {
215                    rows = readSimpleRows(tableName, ids);
216                }
217            }
218            // check we have all the ids (readSimpleRows may have some
219            // missing)
220            for (Row row : rows) {
221                res.add(row);
222                ids.remove(row.id);
223            }
224            // for the missing ids record an empty RowId
225            for (Serializable id : ids) {
226                res.add(new RowId(tableName, id));
227            }
228        }
229        return res;
230    }
231
232    /**
233     * Gets a list of rows for {@link SimpleFragment}s from the database, given the table name and the ids.
234     *
235     * @param tableName the table name
236     * @param ids the ids
237     * @return the list of rows, without the missing ones
238     */
239    protected List<Row> readSimpleRows(String tableName, Collection<Serializable> ids) {
240        if (ids.isEmpty()) {
241            return Collections.emptyList();
242        }
243        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size());
244        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, (Serializable) ids);
245        return getSelectRows(tableName, select, criteriaMap, null, false);
246    }
247
248    /**
249     * Reads several collection rows, given a table name and the ids.
250     *
251     * @param tableName the table name
252     * @param ids the ids
253     */
254    protected List<Row> readCollectionArrays(String tableName, Collection<Serializable> ids) {
255        if (ids.isEmpty()) {
256            return Collections.emptyList();
257        }
258        String[] orderBys = { Model.MAIN_KEY, Model.COLL_TABLE_POS_KEY }; // clusters
259                                                                          // results
260        Set<String> skipColumns = new HashSet<String>(Arrays.asList(Model.COLL_TABLE_POS_KEY));
261        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size(), orderBys, skipColumns);
262
263        String sql = select.sql;
264        if (logger.isLogEnabled()) {
265            logger.logSQL(sql, ids);
266        }
267        try (PreparedStatement ps = connection.prepareStatement(sql)) {
268            int i = 1;
269            for (Serializable id : ids) {
270                dialect.setId(ps, i++, id);
271            }
272            try (ResultSet rs = ps.executeQuery()) {
273                countExecute();
274
275                // get all values from result set, separate by ids
276                // the result set is ordered by id, pos
277                CollectionIO io = getCollectionIO(tableName);
278                PropertyType ftype = model.getCollectionFragmentType(tableName);
279                PropertyType type = ftype.getArrayBaseType();
280                Serializable curId = null;
281                List<Serializable> list = null;
282                Serializable[] returnId = new Serializable[1];
283                int[] returnPos = { -1 };
284                List<Row> res = new LinkedList<Row>();
285                Set<Serializable> remainingIds = new HashSet<Serializable>(ids);
286                while (rs.next()) {
287                    Serializable value = io.getCurrentFromResultSet(rs, select.whatColumns, model, returnId, returnPos);
288                    Serializable newId = returnId[0];
289                    if (newId != null && !newId.equals(curId)) {
290                        // flush old list
291                        if (list != null) {
292                            res.add(new Row(tableName, curId, type.collectionToArray(list)));
293                            remainingIds.remove(curId);
294                        }
295                        curId = newId;
296                        list = new ArrayList<Serializable>();
297                    }
298                    list.add(value);
299                }
300                if (curId != null && list != null) {
301                    // flush last list
302                    res.add(new Row(tableName, curId, type.collectionToArray(list)));
303                    remainingIds.remove(curId);
304                }
305
306                // fill empty ones
307                if (!remainingIds.isEmpty()) {
308                    Serializable[] emptyArray = ftype.getEmptyArray();
309                    for (Serializable id : remainingIds) {
310                        res.add(new Row(tableName, id, emptyArray));
311                    }
312                }
313                if (logger.isLogEnabled()) {
314                    for (Row row : res) {
315                        logger.log("  -> " + row);
316                    }
317                }
318                return res;
319            }
320        } catch (SQLException e) {
321            throw new NuxeoException("Could not select: " + sql, e);
322        }
323    }
324
325    /**
326     * Fetches the rows for a select with fixed criteria given as two maps (a criteriaMap whose values and up in the
327     * returned rows, and a joinMap for other criteria).
328     */
329    protected List<Row> getSelectRows(String tableName, SQLInfoSelect select, Map<String, Serializable> criteriaMap,
330            Map<String, Serializable> joinMap, boolean limitToOne) {
331        List<Row> list = new LinkedList<Row>();
332        if (select.whatColumns.isEmpty()) {
333            // happens when we fetch a fragment whose columns are all opaque
334            // check it's a by-id query
335            if (select.whereColumns.size() == 1 && select.whereColumns.get(0).getKey() == Model.MAIN_KEY
336                    && joinMap == null) {
337                Row row = new Row(tableName, criteriaMap);
338                if (select.opaqueColumns != null) {
339                    for (Column column : select.opaqueColumns) {
340                        row.putNew(column.getKey(), Row.OPAQUE);
341                    }
342                }
343                list.add(row);
344                return list;
345            }
346            // else do a useless select but the criteria are more complex and we
347            // can't shortcut
348        }
349        if (joinMap == null) {
350            joinMap = Collections.emptyMap();
351        }
352        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
353
354            /*
355             * Compute where part.
356             */
357            List<Serializable> debugValues = null;
358            if (logger.isLogEnabled()) {
359                debugValues = new LinkedList<Serializable>();
360            }
361            int i = 1;
362            for (Column column : select.whereColumns) {
363                String key = column.getKey();
364                Serializable v;
365                if (criteriaMap.containsKey(key)) {
366                    v = criteriaMap.get(key);
367                } else if (joinMap.containsKey(key)) {
368                    v = joinMap.get(key);
369                } else {
370                    throw new RuntimeException(key);
371                }
372                if (v == null) {
373                    throw new NuxeoException("Null value for key: " + key);
374                }
375                if (v instanceof Collection<?>) {
376                    // allow insert of several values, for the IN (...) case
377                    for (Object vv : (Collection<?>) v) {
378                        column.setToPreparedStatement(ps, i++, (Serializable) vv);
379                        if (debugValues != null) {
380                            debugValues.add((Serializable) vv);
381                        }
382                    }
383                } else {
384                    column.setToPreparedStatement(ps, i++, v);
385                    if (debugValues != null) {
386                        debugValues.add(v);
387                    }
388                }
389            }
390            if (debugValues != null) {
391                logger.logSQL(select.sql, debugValues);
392            }
393
394            /*
395             * Execute query.
396             */
397            try (ResultSet rs = ps.executeQuery()) {
398                countExecute();
399
400                /*
401                 * Construct the maps from the result set.
402                 */
403                while (rs.next()) {
404                    Row row = new Row(tableName, criteriaMap);
405                    i = 1;
406                    for (Column column : select.whatColumns) {
407                        row.put(column.getKey(), column.getFromResultSet(rs, i++));
408                    }
409                    if (select.opaqueColumns != null) {
410                        for (Column column : select.opaqueColumns) {
411                            row.putNew(column.getKey(), Row.OPAQUE);
412                        }
413                    }
414                    if (logger.isLogEnabled()) {
415                        logger.logResultSet(rs, select.whatColumns);
416                    }
417                    list.add(row);
418                    if (limitToOne) {
419                        return list;
420                    }
421                }
422            }
423            if (limitToOne) {
424                return Collections.emptyList();
425            }
426            return list;
427        } catch (SQLException e) {
428            checkConcurrentUpdate(e);
429            throw new NuxeoException("Could not select: " + select.sql, e);
430        }
431    }
432
433    @Override
434    public void write(RowBatch batch) {
435        if (!batch.creates.isEmpty()) {
436            writeCreates(batch.creates);
437        }
438        if (!batch.updates.isEmpty()) {
439            writeUpdates(batch.updates);
440        }
441        if (!batch.deletes.isEmpty()) {
442            writeDeletes(batch.deletes);
443        }
444        // batch.deletesDependent not executed
445    }
446
447    protected void writeCreates(List<Row> creates) {
448        // reorganize by table
449        Map<String, List<Row>> tableRows = new LinkedHashMap<String, List<Row>>();
450        // hierarchy table first because there are foreign keys to it
451        tableRows.put(Model.HIER_TABLE_NAME, new LinkedList<Row>());
452        for (Row row : creates) {
453            List<Row> rows = tableRows.get(row.tableName);
454            if (rows == null) {
455                tableRows.put(row.tableName, rows = new LinkedList<Row>());
456            }
457            rows.add(row);
458        }
459        // inserts on each table
460        for (Entry<String, List<Row>> en : tableRows.entrySet()) {
461            String tableName = en.getKey();
462            List<Row> rows = en.getValue();
463            if (model.isCollectionFragment(tableName)) {
464                List<RowUpdate> rowus = rows.stream().map(RowUpdate::new).collect(Collectors.toList());
465                insertCollectionRows(tableName, rowus);
466            } else {
467                insertSimpleRows(tableName, rows);
468            }
469        }
470    }
471
472    protected void writeUpdates(Set<RowUpdate> updates) {
473        // reorganize by table
474        Map<String, List<RowUpdate>> tableRows = new HashMap<String, List<RowUpdate>>();
475        for (RowUpdate rowu : updates) {
476            List<RowUpdate> rows = tableRows.get(rowu.row.tableName);
477            if (rows == null) {
478                tableRows.put(rowu.row.tableName, rows = new LinkedList<RowUpdate>());
479            }
480            rows.add(rowu);
481        }
482        // updates on each table
483        for (Entry<String, List<RowUpdate>> en : tableRows.entrySet()) {
484            String tableName = en.getKey();
485            List<RowUpdate> rows = en.getValue();
486            if (model.isCollectionFragment(tableName)) {
487                updateCollectionRows(tableName, rows);
488            } else {
489                updateSimpleRows(tableName, rows);
490            }
491        }
492    }
493
494    protected void writeDeletes(Collection<RowId> deletes) {
495        // reorganize by table
496        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
497        for (RowId rowId : deletes) {
498            Set<Serializable> ids = tableIds.get(rowId.tableName);
499            if (ids == null) {
500                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
501            }
502            ids.add(rowId.id);
503        }
504        // delete on each table
505        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
506            String tableName = en.getKey();
507            Set<Serializable> ids = en.getValue();
508            deleteRows(tableName, ids);
509        }
510    }
511
512    /**
513     * Inserts multiple rows, all for the same table.
514     */
515    protected void insertSimpleRows(String tableName, List<Row> rows) {
516        if (rows.isEmpty()) {
517            return;
518        }
519        String sql = sqlInfo.getInsertSql(tableName);
520        if (sql == null) {
521            throw new NuxeoException("Unknown table: " + tableName);
522        }
523        boolean batched = supportsBatchUpdates && rows.size() > 1;
524        String loggedSql = batched ? sql + " -- BATCHED" : sql;
525        List<Column> columns = sqlInfo.getInsertColumns(tableName);
526        try (PreparedStatement ps = connection.prepareStatement(sql)) {
527            int batch = 0;
528            for (Iterator<Row> rowIt = rows.iterator(); rowIt.hasNext();) {
529                Row row = rowIt.next();
530                if (logger.isLogEnabled()) {
531                    logger.logSQL(loggedSql, columns, row);
532                }
533                int i = 1;
534                for (Column column : columns) {
535                    column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
536                }
537                if (batched) {
538                    ps.addBatch();
539                    batch++;
540                    if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
541                        ps.executeBatch();
542                        countExecute();
543                    }
544                } else {
545                    ps.execute();
546                    countExecute();
547                }
548            }
549        } catch (SQLException e) {
550            if (e instanceof BatchUpdateException) {
551                BatchUpdateException bue = (BatchUpdateException) e;
552                if (e.getCause() == null && bue.getNextException() != null) {
553                    // provide a readable cause in the stack trace
554                    e.initCause(bue.getNextException());
555                }
556            }
557            checkConcurrentUpdate(e);
558            throw new NuxeoException("Could not insert: " + sql, e);
559        }
560    }
561
562    /**
563     * Updates multiple collection rows, all for the same table.
564     */
565    protected void insertCollectionRows(String tableName, List<RowUpdate> rowus) {
566        if (rowus.isEmpty()) {
567            return;
568        }
569        String sql = sqlInfo.getInsertSql(tableName);
570        List<Column> columns = sqlInfo.getInsertColumns(tableName);
571        CollectionIO io = getCollectionIO(tableName);
572        try (PreparedStatement ps = connection.prepareStatement(sql)) {
573            io.executeInserts(ps, rowus, columns, supportsBatchUpdates, sql, this);
574        } catch (SQLException e) {
575            throw new NuxeoException("Could not insert: " + sql, e);
576        }
577    }
578
579    /**
580     * Updates multiple simple rows, all for the same table.
581     */
582    protected void updateSimpleRows(String tableName, List<RowUpdate> rows) {
583        if (rows.isEmpty()) {
584            return;
585        }
586
587        // reorganize by identical queries to allow batching
588        Map<String, SQLInfoSelect> sqlToInfo = new HashMap<>();
589        Map<String, List<RowUpdate>> sqlRowUpdates = new HashMap<>();
590        for (RowUpdate rowu : rows) {
591            SQLInfoSelect update = sqlInfo.getUpdateById(tableName, rowu);
592            String sql = update.sql;
593            sqlToInfo.put(sql, update);
594            sqlRowUpdates.computeIfAbsent(sql, k -> new ArrayList<RowUpdate>()).add(rowu);
595        }
596
597        for (Entry<String, List<RowUpdate>> en : sqlRowUpdates.entrySet()) {
598            String sql = en.getKey();
599            List<RowUpdate> rowUpdates = en.getValue();
600            SQLInfoSelect update = sqlToInfo.get(sql);
601            boolean batched = supportsBatchUpdates && rowUpdates.size() > 1;
602            String loggedSql = batched ? update.sql + " -- BATCHED" : update.sql;
603            try (PreparedStatement ps = connection.prepareStatement(update.sql)) {
604                int batch = 0;
605                for (Iterator<RowUpdate> rowIt = rowUpdates.iterator(); rowIt.hasNext();) {
606                    RowUpdate rowu = rowIt.next();
607                    if (logger.isLogEnabled()) {
608                        logger.logSQL(loggedSql, update.whatColumns, rowu.row, update.whereColumns, rowu.conditions);
609                    }
610                    int i = 1;
611                    for (Column column : update.whatColumns) {
612                        Serializable value = rowu.row.get(column.getKey());
613                        if (value instanceof Delta) {
614                            value = ((Delta) value).getDeltaValue();
615                        }
616                        column.setToPreparedStatement(ps, i++, value);
617                    }
618                    for (Column column : update.whereColumns) {
619                        // id or condition
620                        String key = column.getKey();
621                        Serializable value;
622                        if (key.equals(Model.MAIN_KEY)) {
623                            value = rowu.row.get(key);
624                        } else {
625                            value = rowu.conditions.get(key);
626                        }
627                        column.setToPreparedStatement(ps, i++, value);
628                    }
629                    if (batched) {
630                        ps.addBatch();
631                        batch++;
632                        if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
633                            int[] counts = ps.executeBatch();
634                            countExecute();
635                            for (int j = 0; j < counts.length; j++) {
636                                if (counts[j] != 1) {
637                                    if (model.getRepositoryDescriptor().isChangeTokenEnabled()) {
638                                        Serializable id = rowUpdates.get(j).row.id;
639                                        logger.log("  -> CONCURRENT UPDATE: " + id);
640                                        throw new ConcurrentUpdateException(id.toString());
641                                    }
642                                }
643
644                            }
645                        }
646                    } else {
647                        int count = ps.executeUpdate();
648                        countExecute();
649                        if (count != 1) {
650                            if (model.getRepositoryDescriptor().isChangeTokenEnabled()) {
651                                Serializable id = rowu.row.id;
652                                logger.log("  -> CONCURRENT UPDATE: " + id);
653                                throw new ConcurrentUpdateException(id.toString());
654                            }
655                        }
656                    }
657                }
658            } catch (SQLException e) {
659                checkConcurrentUpdate(e);
660                throw new NuxeoException("Could not update: " + update.sql, e);
661            }
662        }
663    }
664
665    protected void updateCollectionRows(String tableName, List<RowUpdate> rowus) {
666        Set<Serializable> deleteIds = new HashSet<>();
667        for (RowUpdate rowu : rowus) {
668            if (rowu.pos == -1 || collectionDeleteBeforeAppend) {
669                deleteIds.add(rowu.row.id);
670            }
671        }
672        deleteRows(tableName, deleteIds);
673        insertCollectionRows(tableName, rowus);
674    }
675
676    /**
677     * Deletes multiple rows, all for the same table.
678     */
679    protected void deleteRows(String tableName, Set<Serializable> ids) {
680        if (ids.isEmpty()) {
681            return;
682        }
683        int size = ids.size();
684        int chunkSize = sqlInfo.getMaximumArgsForIn();
685        if (size > chunkSize) {
686            List<Serializable> idList = new ArrayList<Serializable>(ids);
687            for (int start = 0; start < size; start += chunkSize) {
688                int end = start + chunkSize;
689                if (end > size) {
690                    end = size;
691                }
692                // needs to be Serializable -> copy
693                List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
694                deleteRowsDirect(tableName, chunkIds);
695            }
696        } else {
697            deleteRowsDirect(tableName, ids);
698        }
699    }
700
701    protected void deleteRowsSoft(List<NodeInfo> nodeInfos) {
702        try {
703            int size = nodeInfos.size();
704            List<Serializable> ids = new ArrayList<Serializable>(size);
705            for (NodeInfo info : nodeInfos) {
706                ids.add(info.id);
707            }
708            int chunkSize = 100; // max size of ids array
709            if (size <= chunkSize) {
710                doSoftDeleteRows(ids);
711            } else {
712                for (int start = 0; start < size;) {
713                    int end = start + chunkSize;
714                    if (end > size) {
715                        end = size;
716                    }
717                    doSoftDeleteRows(ids.subList(start, end));
718                    start = end;
719                }
720            }
721        } catch (SQLException e) {
722            throw new NuxeoException("Could not soft delete", e);
723        }
724    }
725
726    // not chunked
727    protected void doSoftDeleteRows(List<Serializable> ids) throws SQLException {
728        Serializable whereIds = newIdArray(ids);
729        Calendar now = Calendar.getInstance();
730        String sql = sqlInfo.getSoftDeleteSql();
731        if (logger.isLogEnabled()) {
732            logger.logSQL(sql, Arrays.asList(whereIds, now));
733        }
734        try (PreparedStatement ps = connection.prepareStatement(sql)) {
735            setToPreparedStatementIdArray(ps, 1, whereIds);
736            dialect.setToPreparedStatementTimestamp(ps, 2, now, null);
737            ps.execute();
738            countExecute();
739            return;
740        }
741    }
742
743    protected Serializable newIdArray(Collection<Serializable> ids) {
744        if (dialect.supportsArrays()) {
745            return ids.toArray(); // Object[]
746        } else {
747            // join with '|'
748            StringBuilder b = new StringBuilder();
749            for (Serializable id : ids) {
750                b.append(id);
751                b.append('|');
752            }
753            b.setLength(b.length() - 1);
754            return b.toString();
755        }
756    }
757
758    protected void setToPreparedStatementIdArray(PreparedStatement ps, int index, Serializable idArray)
759            throws SQLException {
760        if (idArray instanceof String) {
761            ps.setString(index, (String) idArray);
762        } else {
763            Array array = dialect.createArrayOf(Types.OTHER, (Object[]) idArray, connection);
764            ps.setArray(index, array);
765        }
766    }
767
768    /**
769     * Clean up soft-deleted rows.
770     * <p>
771     * Rows deleted more recently than the beforeTime are left alone. Only a limited number of rows may be deleted, to
772     * prevent transaction during too long.
773     *
774     * @param max the maximum number of rows to delete at a time
775     * @param beforeTime the maximum deletion time of the rows to delete
776     * @return the number of rows deleted
777     */
778    public int cleanupDeletedRows(int max, Calendar beforeTime) {
779        if (max < 0) {
780            max = 0;
781        }
782        String sql = sqlInfo.getSoftDeleteCleanupSql();
783        if (logger.isLogEnabled()) {
784            logger.logSQL(sql, Arrays.<Serializable> asList(beforeTime, Long.valueOf(max)));
785        }
786        try {
787            if (sql.startsWith("{")) {
788                // callable statement
789                boolean outFirst = sql.startsWith("{?=");
790                int outIndex = outFirst ? 1 : 3;
791                int inIndex = outFirst ? 2 : 1;
792                try (CallableStatement cs = connection.prepareCall(sql)) {
793                    cs.setInt(inIndex, max);
794                    dialect.setToPreparedStatementTimestamp(cs, inIndex + 1, beforeTime, null);
795                    cs.registerOutParameter(outIndex, Types.INTEGER);
796                    cs.execute();
797                    int count = cs.getInt(outIndex);
798                    logger.logCount(count);
799                    return count;
800                }
801            } else {
802                // standard prepared statement with result set
803                try (PreparedStatement ps = connection.prepareStatement(sql)) {
804                    ps.setInt(1, max);
805                    dialect.setToPreparedStatementTimestamp(ps, 2, beforeTime, null);
806                    try (ResultSet rs = ps.executeQuery()) {
807                        countExecute();
808                        if (!rs.next()) {
809                            throw new NuxeoException("Cannot get result");
810                        }
811                        int count = rs.getInt(1);
812                        logger.logCount(count);
813                        return count;
814                    }
815                }
816            }
817        } catch (SQLException e) {
818            throw new NuxeoException("Could not purge soft delete", e);
819        }
820    }
821
822    protected void deleteRowsDirect(String tableName, Collection<Serializable> ids) {
823        String sql = sqlInfo.getDeleteSql(tableName, ids.size());
824        if (logger.isLogEnabled()) {
825            logger.logSQL(sql, ids);
826        }
827        try (PreparedStatement ps = connection.prepareStatement(sql)) {
828            int i = 1;
829            for (Serializable id : ids) {
830                dialect.setId(ps, i++, id);
831            }
832            int count = ps.executeUpdate();
833            countExecute();
834            logger.logCount(count);
835        } catch (SQLException e) {
836            checkConcurrentUpdate(e);
837            throw new NuxeoException("Could not delete: " + tableName, e);
838        }
839    }
840
841    @Override
842    public Row readSimpleRow(RowId rowId) {
843        SQLInfoSelect select = sqlInfo.selectFragmentById.get(rowId.tableName);
844        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, rowId.id);
845        List<Row> maps = getSelectRows(rowId.tableName, select, criteriaMap, null, true);
846        return maps.isEmpty() ? null : maps.get(0);
847    }
848
849    @Override
850    public Map<String, String> getBinaryFulltext(RowId rowId) {
851        ArrayList<String> columns = new ArrayList<String>();
852        for (String index : model.getFulltextConfiguration().indexesAllBinary) {
853            String col = Model.FULLTEXT_BINARYTEXT_KEY + model.getFulltextIndexSuffix(index);
854            columns.add(col);
855        }
856        Serializable id = rowId.id;
857        Map<String, String> ret = new HashMap<String, String>(columns.size());
858        String sql = dialect.getBinaryFulltextSql(columns);
859        if (sql == null) {
860            logger.info("getBinaryFulltextSql not supported for dialect " + dialect);
861            return ret;
862        }
863        if (logger.isLogEnabled()) {
864            logger.logSQL(sql, Collections.singletonList(id));
865        }
866        try (PreparedStatement ps = connection.prepareStatement(sql)) {
867            dialect.setId(ps, 1, id);
868            try (ResultSet rs = ps.executeQuery()) {
869                while (rs.next()) {
870                    for (int i = 1; i <= columns.size(); i++) {
871                        ret.put(columns.get(i - 1), rs.getString(i));
872                    }
873                }
874                if (logger.isLogEnabled()) {
875                    logger.log("  -> " + ret);
876                }
877            }
878            return ret;
879        } catch (SQLException e) {
880            throw new NuxeoException("Could not select: " + sql, e);
881        }
882    }
883
884    @Override
885    public Serializable[] readCollectionRowArray(RowId rowId) {
886        String tableName = rowId.tableName;
887        Serializable id = rowId.id;
888        String sql = sqlInfo.selectFragmentById.get(tableName).sql;
889        if (logger.isLogEnabled()) {
890            logger.logSQL(sql, Collections.singletonList(id));
891        }
892        try (PreparedStatement ps = connection.prepareStatement(sql)) {
893            List<Column> columns = sqlInfo.selectFragmentById.get(tableName).whatColumns;
894            dialect.setId(ps, 1, id); // assumes only one primary column
895            try (ResultSet rs = ps.executeQuery()) {
896                countExecute();
897
898                // construct the resulting collection using each row
899                CollectionIO io = getCollectionIO(tableName);
900                List<Serializable> list = new ArrayList<Serializable>();
901                Serializable[] returnId = new Serializable[1];
902                int[] returnPos = { -1 };
903                while (rs.next()) {
904                    list.add(io.getCurrentFromResultSet(rs, columns, model, returnId, returnPos));
905                }
906                PropertyType type = model.getCollectionFragmentType(tableName).getArrayBaseType();
907                Serializable[] array = type.collectionToArray(list);
908
909                if (logger.isLogEnabled()) {
910                    logger.log("  -> " + Arrays.asList(array));
911                }
912                return array;
913            }
914        } catch (SQLException e) {
915            throw new NuxeoException("Could not select: " + sql, e);
916        }
917    }
918
919    @Override
920    public List<Row> readSelectionRows(SelectionType selType, Serializable selId, Serializable filter,
921            Serializable criterion, boolean limitToOne) {
922        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
923        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
924        criteriaMap.put(selType.selKey, selId);
925        SQLInfoSelect select;
926        if (filter == null) {
927            select = selInfo.selectAll;
928        } else {
929            select = selInfo.selectFiltered;
930            criteriaMap.put(selType.filterKey, filter);
931        }
932        if (selType.criterionKey != null) {
933            criteriaMap.put(selType.criterionKey, criterion);
934        }
935        return getSelectRows(selType.tableName, select, criteriaMap, null, limitToOne);
936    }
937
938    @Override
939    public CopyResult copy(IdWithTypes source, Serializable destParentId, String destName, Row overwriteRow) {
940        // assert !model.separateMainTable; // other case not implemented
941        Invalidations invalidations = new Invalidations();
942        try {
943            Map<Serializable, Serializable> idMap = new LinkedHashMap<Serializable, Serializable>();
944            Map<Serializable, IdWithTypes> idToTypes = new HashMap<Serializable, IdWithTypes>();
945            // copy the hierarchy fragments recursively
946            Serializable overwriteId = overwriteRow == null ? null : overwriteRow.id;
947            if (overwriteId != null) {
948                // overwrite hier root with explicit values
949                String tableName = Model.HIER_TABLE_NAME;
950                updateSimpleRowWithValues(tableName, overwriteRow);
951                idMap.put(source.id, overwriteId);
952                // invalidate
953                invalidations.addModified(new RowId(tableName, overwriteId));
954            }
955            // create the new hierarchy by copy
956            boolean resetVersion = destParentId != null;
957            Serializable newRootId = copyHierRecursive(source, destParentId, destName, overwriteId, resetVersion,
958                    idMap, idToTypes);
959            // invalidate children
960            Serializable invalParentId = overwriteId == null ? destParentId : overwriteId;
961            if (invalParentId != null) { // null for a new version
962                invalidations.addModified(new RowId(Invalidations.PARENT, invalParentId));
963            }
964            // copy all collected fragments
965            Set<Serializable> proxyIds = new HashSet<Serializable>();
966            for (Entry<String, Set<Serializable>> entry : model.getPerFragmentIds(idToTypes).entrySet()) {
967                String tableName = entry.getKey();
968                if (tableName.equals(Model.HIER_TABLE_NAME)) {
969                    // already done
970                    continue;
971                }
972                if (tableName.equals(Model.VERSION_TABLE_NAME)) {
973                    // versions not fileable
974                    // restore must not copy versions either
975                    continue;
976                }
977                Set<Serializable> ids = entry.getValue();
978                if (tableName.equals(Model.PROXY_TABLE_NAME)) {
979                    for (Serializable id : ids) {
980                        proxyIds.add(idMap.get(id)); // copied ids
981                    }
982                }
983                Boolean invalidation = copyRows(tableName, ids, idMap, overwriteId);
984                if (invalidation != null) {
985                    // overwrote something
986                    // make sure things are properly invalidated in this and
987                    // other sessions
988                    if (Boolean.TRUE.equals(invalidation)) {
989                        invalidations.addModified(new RowId(tableName, overwriteId));
990                    } else {
991                        invalidations.addDeleted(new RowId(tableName, overwriteId));
992                    }
993                }
994            }
995            return new CopyResult(newRootId, invalidations, proxyIds);
996        } catch (SQLException e) {
997            throw new NuxeoException("Could not copy: " + source.id.toString(), e);
998        }
999    }
1000
1001    /**
1002     * Updates a row in the database with given explicit values.
1003     */
1004    protected void updateSimpleRowWithValues(String tableName, Row row) {
1005        Update update = sqlInfo.getUpdateByIdForKeys(tableName, row.getKeys());
1006        Table table = update.getTable();
1007        String sql = update.getStatement();
1008        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1009            if (logger.isLogEnabled()) {
1010                List<Serializable> values = new LinkedList<Serializable>();
1011                values.addAll(row.getValues());
1012                values.add(row.id); // id last in SQL
1013                logger.logSQL(sql, values);
1014            }
1015            int i = 1;
1016            List<String> keys = row.getKeys();
1017            List<Serializable> values = row.getValues();
1018            int size = keys.size();
1019            for (int r = 0; r < size; r++) {
1020                String key = keys.get(r);
1021                Serializable value = values.get(r);
1022                table.getColumn(key).setToPreparedStatement(ps, i++, value);
1023            }
1024            dialect.setId(ps, i, row.id); // id last in SQL
1025            int count = ps.executeUpdate();
1026            countExecute();
1027        } catch (SQLException e) {
1028            throw new NuxeoException("Could not update: " + sql, e);
1029        }
1030    }
1031
1032    /**
1033     * Copies hierarchy from id to parentId, and recurses.
1034     * <p>
1035     * If name is {@code null}, then the original name is kept.
1036     * <p>
1037     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1038     * filled with the type of each (source) fragment.
1039     * <p>
1040     * TODO: this should be optimized to use a stored procedure.
1041     *
1042     * @param overwriteId when not {@code null}, the copy is done onto this existing node (skipped)
1043     * @return the new root id
1044     */
1045    protected Serializable copyHierRecursive(IdWithTypes source, Serializable parentId, String name,
1046            Serializable overwriteId, boolean resetVersion, Map<Serializable, Serializable> idMap,
1047            Map<Serializable, IdWithTypes> idToTypes) throws SQLException {
1048        idToTypes.put(source.id, source);
1049        Serializable newId;
1050        if (overwriteId == null) {
1051            newId = copyHier(source.id, parentId, name, resetVersion, idMap);
1052        } else {
1053            newId = overwriteId;
1054            idMap.put(source.id, newId);
1055        }
1056        // recurse in children
1057        boolean onlyComplex = parentId == null;
1058        for (IdWithTypes child : getChildrenIdsWithTypes(source.id, onlyComplex)) {
1059            copyHierRecursive(child, newId, null, null, resetVersion, idMap, idToTypes);
1060        }
1061        return newId;
1062    }
1063
1064    /**
1065     * Copies hierarchy from id to a new child of parentId.
1066     * <p>
1067     * If name is {@code null}, then the original name is kept.
1068     * <p>
1069     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1070     * filled with the type of each (source) fragment.
1071     *
1072     * @return the new id
1073     */
1074    protected Serializable copyHier(Serializable id, Serializable parentId, String name, boolean resetVersion,
1075            Map<Serializable, Serializable> idMap) throws SQLException {
1076        boolean explicitName = name != null;
1077
1078        SQLInfoSelect copy = sqlInfo.getCopyHier(explicitName, resetVersion);
1079        try (PreparedStatement ps = connection.prepareStatement(copy.sql)) {
1080            Serializable newId = generateNewId();
1081
1082            List<Serializable> debugValues = null;
1083            if (logger.isLogEnabled()) {
1084                debugValues = new ArrayList<Serializable>(4);
1085            }
1086            int i = 1;
1087            for (Column column : copy.whatColumns) {
1088                String key = column.getKey();
1089                Serializable v;
1090                if (key.equals(Model.HIER_PARENT_KEY)) {
1091                    v = parentId;
1092                } else if (key.equals(Model.HIER_CHILD_NAME_KEY)) {
1093                    // present if name explicitely set (first iteration)
1094                    v = name;
1095                } else if (key.equals(Model.MAIN_KEY)) {
1096                    // present if APP_UUID generation
1097                    v = newId;
1098                } else if (key.equals(Model.MAIN_BASE_VERSION_KEY) || key.equals(Model.MAIN_CHECKED_IN_KEY)) {
1099                    v = null;
1100                } else if (key.equals(Model.MAIN_MINOR_VERSION_KEY) || key.equals(Model.MAIN_MAJOR_VERSION_KEY)) {
1101                    // present if reset version (regular copy, not checkin)
1102                    v = null;
1103                } else {
1104                    throw new RuntimeException(column.toString());
1105                }
1106                column.setToPreparedStatement(ps, i++, v);
1107                if (debugValues != null) {
1108                    debugValues.add(v);
1109                }
1110            }
1111            // last parameter is for 'WHERE "id" = ?'
1112            Column whereColumn = copy.whereColumns.get(0);
1113            whereColumn.setToPreparedStatement(ps, i, id);
1114            if (debugValues != null) {
1115                debugValues.add(id);
1116                logger.logSQL(copy.sql, debugValues);
1117            }
1118            int count = ps.executeUpdate();
1119            countExecute();
1120
1121            // TODO DB_IDENTITY
1122            // post insert fetch idrow
1123
1124            idMap.put(id, newId);
1125            return newId;
1126        }
1127    }
1128
1129    /**
1130     * Gets the children ids and types of a node.
1131     */
1132    protected List<IdWithTypes> getChildrenIdsWithTypes(Serializable id, boolean onlyComplex) throws SQLException {
1133        List<IdWithTypes> children = new LinkedList<IdWithTypes>();
1134        String sql = sqlInfo.getSelectChildrenIdsAndTypesSql(onlyComplex);
1135        if (logger.isLogEnabled()) {
1136            logger.logSQL(sql, Collections.singletonList(id));
1137        }
1138        List<Column> columns = sqlInfo.getSelectChildrenIdsAndTypesWhatColumns();
1139        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1140            List<String> debugValues = null;
1141            if (logger.isLogEnabled()) {
1142                debugValues = new LinkedList<String>();
1143            }
1144            dialect.setId(ps, 1, id); // parent id
1145            try (ResultSet rs = ps.executeQuery()) {
1146                countExecute();
1147                while (rs.next()) {
1148                    Serializable childId = null;
1149                    String childPrimaryType = null;
1150                    String[] childMixinTypes = null;
1151                    int i = 1;
1152                    for (Column column : columns) {
1153                        String key = column.getKey();
1154                        Serializable value = column.getFromResultSet(rs, i++);
1155                        if (key.equals(Model.MAIN_KEY)) {
1156                            childId = value;
1157                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1158                            childPrimaryType = (String) value;
1159                        } else if (key.equals(Model.MAIN_MIXIN_TYPES_KEY)) {
1160                            childMixinTypes = (String[]) value;
1161                        }
1162                    }
1163                    children.add(new IdWithTypes(childId, childPrimaryType, childMixinTypes));
1164                    if (debugValues != null) {
1165                        debugValues.add(childId + "/" + childPrimaryType + "/" + Arrays.toString(childMixinTypes));
1166                    }
1167                }
1168            }
1169            if (debugValues != null) {
1170                logger.log("  -> " + debugValues);
1171            }
1172            return children;
1173        }
1174    }
1175
1176    /**
1177     * Copy the rows from tableName with given ids into new ones with new ids given by idMap.
1178     * <p>
1179     * A new row with id {@code overwriteId} is first deleted.
1180     *
1181     * @return {@link Boolean#TRUE} for a modification or creation, {@link Boolean#FALSE} for a deletion, {@code null}
1182     *         otherwise (still absent)
1183     * @throws SQLException
1184     */
1185    protected Boolean copyRows(String tableName, Set<Serializable> ids, Map<Serializable, Serializable> idMap,
1186            Serializable overwriteId) throws SQLException {
1187        String copySql = sqlInfo.getCopySql(tableName);
1188        Column copyIdColumn = sqlInfo.getCopyIdColumn(tableName);
1189        String deleteSql = sqlInfo.getDeleteSql(tableName);
1190        try (PreparedStatement copyPs = connection.prepareStatement(copySql);
1191                PreparedStatement deletePs = connection.prepareStatement(deleteSql)) {
1192            boolean before = false;
1193            boolean after = false;
1194            for (Serializable id : ids) {
1195                Serializable newId = idMap.get(id);
1196                boolean overwrite = newId.equals(overwriteId);
1197                if (overwrite) {
1198                    // remove existing first
1199                    if (logger.isLogEnabled()) {
1200                        logger.logSQL(deleteSql, Collections.singletonList(newId));
1201                    }
1202                    dialect.setId(deletePs, 1, newId);
1203                    int delCount = deletePs.executeUpdate();
1204                    countExecute();
1205                    before = delCount > 0;
1206                }
1207                copyIdColumn.setToPreparedStatement(copyPs, 1, newId);
1208                copyIdColumn.setToPreparedStatement(copyPs, 2, id);
1209                if (logger.isLogEnabled()) {
1210                    logger.logSQL(copySql, Arrays.asList(newId, id));
1211                }
1212                int copyCount = copyPs.executeUpdate();
1213                countExecute();
1214                if (overwrite) {
1215                    after = copyCount > 0;
1216                }
1217            }
1218            // * , n -> mod (TRUE)
1219            // n , 0 -> del (FALSE)
1220            // 0 , 0 -> null
1221            return after ? Boolean.TRUE : (before ? Boolean.FALSE : null);
1222        }
1223    }
1224
1225    @Override
1226    public List<NodeInfo> remove(NodeInfo rootInfo) {
1227        Serializable rootId = rootInfo.id;
1228        List<NodeInfo> info = getDescendantsInfo(rootId);
1229        info.add(rootInfo);
1230        if (sqlInfo.softDeleteEnabled) {
1231            deleteRowsSoft(info);
1232        } else {
1233            deleteRowsDirect(Model.HIER_TABLE_NAME, Collections.singleton(rootId));
1234        }
1235        return info;
1236    }
1237
1238    protected List<NodeInfo> getDescendantsInfo(Serializable rootId) {
1239        if (!dialect.supportsFastDescendants()) {
1240            return getDescendantsInfoIterative(rootId);
1241        }
1242        List<NodeInfo> descendants = new LinkedList<NodeInfo>();
1243        String sql = sqlInfo.getSelectDescendantsInfoSql();
1244        if (logger.isLogEnabled()) {
1245            logger.logSQL(sql, Collections.singletonList(rootId));
1246        }
1247        List<Column> columns = sqlInfo.getSelectDescendantsInfoWhatColumns();
1248        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1249            List<String> debugValues = null;
1250            if (logger.isLogEnabled()) {
1251                debugValues = new LinkedList<String>();
1252            }
1253            dialect.setId(ps, 1, rootId); // parent id
1254            try (ResultSet rs = ps.executeQuery()) {
1255                countExecute();
1256                while (rs.next()) {
1257                    Serializable id = null;
1258                    Serializable parentId = null;
1259                    String primaryType = null;
1260                    Boolean isProperty = null;
1261                    Serializable targetId = null;
1262                    Serializable versionableId = null;
1263                    int i = 1;
1264                    for (Column column : columns) {
1265                        String key = column.getKey();
1266                        Serializable value = column.getFromResultSet(rs, i++);
1267                        if (key.equals(Model.MAIN_KEY)) {
1268                            id = value;
1269                        } else if (key.equals(Model.HIER_PARENT_KEY)) {
1270                            parentId = value;
1271                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1272                            primaryType = (String) value;
1273                        } else if (key.equals(Model.HIER_CHILD_ISPROPERTY_KEY)) {
1274                            isProperty = (Boolean) value;
1275                        } else if (key.equals(Model.PROXY_TARGET_KEY)) {
1276                            targetId = value;
1277                        } else if (key.equals(Model.PROXY_VERSIONABLE_KEY)) {
1278                            versionableId = value;
1279                        }
1280                        // no mixins (not useful to caller)
1281                        // no versions (not fileable)
1282                    }
1283                    descendants.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1284                    if (debugValues != null) {
1285                        if (debugValues.size() < DEBUG_MAX_TREE) {
1286                            debugValues.add(id + "/" + primaryType);
1287                        }
1288                    }
1289                }
1290            }
1291            if (debugValues != null) {
1292                if (debugValues.size() >= DEBUG_MAX_TREE) {
1293                    debugValues.add("... (" + descendants.size() + ") results");
1294                }
1295                logger.log("  -> " + debugValues);
1296            }
1297            return descendants;
1298        } catch (SQLException e) {
1299            throw new NuxeoException("Failed to get descendants", e);
1300        }
1301    }
1302
1303    protected List<NodeInfo> getDescendantsInfoIterative(Serializable rootId) {
1304        Set<Serializable> done = new HashSet<>();
1305        List<Serializable> todo = new ArrayList<>(Collections.singleton(rootId));
1306        List<NodeInfo> descendants = new ArrayList<NodeInfo>();
1307        while (!todo.isEmpty()) {
1308            List<NodeInfo> infos;
1309            int size = todo.size();
1310            int chunkSize = sqlInfo.getMaximumArgsForIn();
1311            if (size > chunkSize) {
1312                infos = new ArrayList<>();
1313                for (int start = 0; start < size; start += chunkSize) {
1314                    int end = start + chunkSize;
1315                    if (end > size) {
1316                        end = size;
1317                    }
1318                    // needs to be Serializable -> copy
1319                    List<Serializable> chunkTodo = new ArrayList<Serializable>(todo.subList(start, end));
1320                    List<NodeInfo> chunkInfos = getChildrenNodeInfos(chunkTodo);
1321                    infos.addAll(chunkInfos);
1322                }
1323            } else {
1324                infos = getChildrenNodeInfos(todo);
1325            }
1326            todo = new ArrayList<>();
1327            for (NodeInfo info : infos) {
1328                Serializable id = info.id;
1329                if (!done.add(id)) {
1330                    continue;
1331                }
1332                todo.add(id);
1333                descendants.add(info);
1334            }
1335        }
1336        return descendants;
1337    }
1338
1339    /**
1340     * Gets the children of a node as a list of NodeInfo.
1341     */
1342    protected List<NodeInfo> getChildrenNodeInfos(Collection<Serializable> ids) {
1343        List<NodeInfo> children = new LinkedList<NodeInfo>();
1344        SQLInfoSelect select = sqlInfo.getSelectChildrenNodeInfos(ids.size());
1345        if (logger.isLogEnabled()) {
1346            logger.logSQL(select.sql, ids);
1347        }
1348        Column where = select.whereColumns.get(0);
1349        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
1350            List<String> debugValues = null;
1351            if (logger.isLogEnabled()) {
1352                debugValues = new LinkedList<String>();
1353            }
1354            int ii = 1;
1355            for (Serializable id : ids) {
1356                where.setToPreparedStatement(ps, ii++, id);
1357            }
1358            try (ResultSet
1359            rs = ps.executeQuery()) {
1360                countExecute();
1361                while (rs.next()) {
1362                    Serializable id = null;
1363                    Serializable parentId = null;
1364                    String primaryType = null;
1365                    Boolean isProperty = Boolean.FALSE;
1366                    Serializable targetId = null;
1367                    Serializable versionableId = null;
1368                    int i = 1;
1369                    for (Column column : select.whatColumns) {
1370                        String key = column.getKey();
1371                        Serializable value = column.getFromResultSet(rs, i++);
1372                        if (key.equals(Model.MAIN_KEY)) {
1373                            id = value;
1374                        } else if (key.equals(Model.HIER_PARENT_KEY)) {
1375                            parentId = value;
1376                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1377                            primaryType = (String) value;
1378                        } else if (key.equals(Model.PROXY_TARGET_KEY)) {
1379                            targetId = value;
1380                        } else if (key.equals(Model.PROXY_VERSIONABLE_KEY)) {
1381                            versionableId = value;
1382                        }
1383                    }
1384                    children.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1385                    if (debugValues != null) {
1386                        if (debugValues.size() < DEBUG_MAX_TREE) {
1387                            debugValues.add(id + "/" + primaryType);
1388                        }
1389                    }
1390                }
1391            }
1392            if (debugValues != null) {
1393                if (debugValues.size() >= DEBUG_MAX_TREE) {
1394                    debugValues.add("... (" + children.size() + ") results");
1395                }
1396                logger.log("  -> " + debugValues);
1397            }
1398            return children;
1399        } catch (SQLException e) {
1400            throw new NuxeoException("Failed to get descendants", e);
1401        }
1402    }
1403
1404}