001/*
002 * (C) Copyright 2006-2016 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.sql.jdbc;
020
021import java.io.Serializable;
022import java.sql.Array;
023import java.sql.BatchUpdateException;
024import java.sql.CallableStatement;
025import java.sql.PreparedStatement;
026import java.sql.ResultSet;
027import java.sql.SQLException;
028import java.sql.Types;
029import java.util.ArrayList;
030import java.util.Arrays;
031import java.util.Calendar;
032import java.util.Collection;
033import java.util.Collections;
034import java.util.HashMap;
035import java.util.HashSet;
036import java.util.Iterator;
037import java.util.LinkedHashMap;
038import java.util.LinkedList;
039import java.util.List;
040import java.util.ListIterator;
041import java.util.Map;
042import java.util.Map.Entry;
043import java.util.Set;
044import java.util.stream.Collectors;
045
046import javax.transaction.xa.XAException;
047import javax.transaction.xa.Xid;
048
049import org.apache.commons.lang.StringUtils;
050import org.nuxeo.ecm.core.api.ConcurrentUpdateException;
051import org.nuxeo.ecm.core.api.NuxeoException;
052import org.nuxeo.ecm.core.api.model.Delta;
053import org.nuxeo.ecm.core.storage.sql.ClusterInvalidator;
054import org.nuxeo.ecm.core.storage.sql.Invalidations;
055import org.nuxeo.ecm.core.storage.sql.InvalidationsPropagator;
056import org.nuxeo.ecm.core.storage.sql.Model;
057import org.nuxeo.ecm.core.storage.sql.PropertyType;
058import org.nuxeo.ecm.core.storage.sql.Row;
059import org.nuxeo.ecm.core.storage.sql.RowId;
060import org.nuxeo.ecm.core.storage.sql.RowMapper;
061import org.nuxeo.ecm.core.storage.sql.SelectionType;
062import org.nuxeo.ecm.core.storage.sql.SimpleFragment;
063import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelect;
064import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelection;
065import org.nuxeo.ecm.core.storage.sql.jdbc.db.Column;
066import org.nuxeo.ecm.core.storage.sql.jdbc.db.Table;
067import org.nuxeo.ecm.core.storage.sql.jdbc.db.Update;
068import org.nuxeo.runtime.api.Framework;
069import org.nuxeo.runtime.services.config.ConfigurationService;
070
071/**
072 * A {@link JDBCRowMapper} maps {@link Row}s to and from a JDBC database.
073 */
074public class JDBCRowMapper extends JDBCConnection implements RowMapper {
075
076    public static final int UPDATE_BATCH_SIZE = 100; // also insert/delete
077
078    public static final int DEBUG_MAX_TREE = 50;
079
080    /** Property to determine whether collection appends delete all then re-insert, or are optimized for append. */
081    public static final String COLLECTION_DELETE_BEFORE_APPEND_PROP = "org.nuxeo.vcs.list-delete-before-append";
082
083    /**
084     * Cluster invalidator, or {@code null} if this mapper does not participate in invalidation propagation (cluster
085     * invalidator, lock manager).
086     */
087    private final ClusterInvalidator clusterInvalidator;
088
089    private final InvalidationsPropagator invalidationsPropagator;
090
091    private final boolean collectionDeleteBeforeAppend;
092
093    private final CollectionIO aclCollectionIO;
094
095    private final CollectionIO scalarCollectionIO;
096
097    public JDBCRowMapper(Model model, SQLInfo sqlInfo, ClusterInvalidator clusterInvalidator,
098            InvalidationsPropagator invalidationsPropagator) {
099        super(model, sqlInfo);
100        this.clusterInvalidator = clusterInvalidator;
101        this.invalidationsPropagator = invalidationsPropagator;
102        ConfigurationService configurationService = Framework.getService(ConfigurationService.class);
103        collectionDeleteBeforeAppend = configurationService.isBooleanPropertyTrue(COLLECTION_DELETE_BEFORE_APPEND_PROP);
104        aclCollectionIO = new ACLCollectionIO(collectionDeleteBeforeAppend);
105        scalarCollectionIO = new ScalarCollectionIO(collectionDeleteBeforeAppend);
106    }
107
108    @Override
109    public Invalidations receiveInvalidations() {
110        if (clusterInvalidator != null) {
111            Invalidations invalidations = clusterInvalidator.receiveInvalidations();
112            // send received invalidations to all mappers
113            if (invalidations != null && !invalidations.isEmpty()) {
114                invalidationsPropagator.propagateInvalidations(invalidations, null);
115            }
116            return invalidations;
117        } else {
118            return null;
119        }
120    }
121
122
123    @Override
124    public void sendInvalidations(Invalidations invalidations) {
125        if (clusterInvalidator != null) {
126            clusterInvalidator.sendInvalidations(invalidations);
127        }
128    }
129
130    @Override
131    public void clearCache() {
132        // no cache
133    }
134
135    @Override
136    public long getCacheSize() {
137        return 0;
138    }
139
140    @Override
141    public void rollback(Xid xid) throws XAException {
142        try {
143            xaresource.rollback(xid);
144        } catch (XAException e) {
145            logger.error("XA error on rollback: " + e);
146            throw e;
147        }
148    }
149
150    protected CollectionIO getCollectionIO(String tableName) {
151        return tableName.equals(Model.ACL_TABLE_NAME) ? aclCollectionIO : scalarCollectionIO;
152    }
153
154    @Override
155    public Serializable generateNewId() {
156        try {
157            return dialect.getGeneratedId(connection);
158        } catch (SQLException e) {
159            throw new NuxeoException(e);
160        }
161    }
162
163    /*
164     * ----- RowIO -----
165     */
166
167    @Override
168    public List<? extends RowId> read(Collection<RowId> rowIds, boolean cacheOnly) {
169        List<RowId> res = new ArrayList<RowId>(rowIds.size());
170        if (cacheOnly) {
171            // return no data
172            for (RowId rowId : rowIds) {
173                res.add(new RowId(rowId));
174            }
175            return res;
176        }
177        // reorganize by table
178        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
179        for (RowId rowId : rowIds) {
180            Set<Serializable> ids = tableIds.get(rowId.tableName);
181            if (ids == null) {
182                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
183            }
184            ids.add(rowId.id);
185        }
186        // read on each table
187        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
188            String tableName = en.getKey();
189            Set<Serializable> ids = new HashSet<Serializable>(en.getValue());
190            int size = ids.size();
191            int chunkSize = sqlInfo.getMaximumArgsForIn();
192            List<Row> rows;
193            if (size > chunkSize) {
194                List<Serializable> idList = new ArrayList<Serializable>(ids);
195                rows = new ArrayList<Row>(size);
196                for (int start = 0; start < size; start += chunkSize) {
197                    int end = start + chunkSize;
198                    if (end > size) {
199                        end = size;
200                    }
201                    // needs to be Serializable -> copy
202                    List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
203                    List<Row> chunkRows;
204                    if (model.isCollectionFragment(tableName)) {
205                        chunkRows = readCollectionArrays(tableName, chunkIds);
206                    } else {
207                        chunkRows = readSimpleRows(tableName, chunkIds);
208                    }
209                    rows.addAll(chunkRows);
210                }
211            } else {
212                if (model.isCollectionFragment(tableName)) {
213                    rows = readCollectionArrays(tableName, ids);
214                } else {
215                    rows = readSimpleRows(tableName, ids);
216                }
217            }
218            // check we have all the ids (readSimpleRows may have some
219            // missing)
220            for (Row row : rows) {
221                res.add(row);
222                ids.remove(row.id);
223            }
224            // for the missing ids record an empty RowId
225            for (Serializable id : ids) {
226                res.add(new RowId(tableName, id));
227            }
228        }
229        return res;
230    }
231
232    /**
233     * Gets a list of rows for {@link SimpleFragment}s from the database, given the table name and the ids.
234     *
235     * @param tableName the table name
236     * @param ids the ids
237     * @return the list of rows, without the missing ones
238     */
239    protected List<Row> readSimpleRows(String tableName, Collection<Serializable> ids) {
240        if (ids.isEmpty()) {
241            return Collections.emptyList();
242        }
243        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size());
244        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, (Serializable) ids);
245        return getSelectRows(tableName, select, criteriaMap, null, false);
246    }
247
248    /**
249     * Reads several collection rows, given a table name and the ids.
250     *
251     * @param tableName the table name
252     * @param ids the ids
253     */
254    protected List<Row> readCollectionArrays(String tableName, Collection<Serializable> ids) {
255        if (ids.isEmpty()) {
256            return Collections.emptyList();
257        }
258        String[] orderBys = { Model.MAIN_KEY, Model.COLL_TABLE_POS_KEY }; // clusters
259                                                                          // results
260        Set<String> skipColumns = new HashSet<String>(Arrays.asList(Model.COLL_TABLE_POS_KEY));
261        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size(), orderBys, skipColumns);
262
263        String sql = select.sql;
264        if (logger.isLogEnabled()) {
265            logger.logSQL(sql, ids);
266        }
267        try (PreparedStatement ps = connection.prepareStatement(sql)) {
268            int i = 1;
269            for (Serializable id : ids) {
270                dialect.setId(ps, i++, id);
271            }
272            try (ResultSet rs = ps.executeQuery()) {
273                countExecute();
274
275                // get all values from result set, separate by ids
276                // the result set is ordered by id, pos
277                CollectionIO io = getCollectionIO(tableName);
278                PropertyType ftype = model.getCollectionFragmentType(tableName);
279                PropertyType type = ftype.getArrayBaseType();
280                Serializable curId = null;
281                List<Serializable> list = null;
282                Serializable[] returnId = new Serializable[1];
283                int[] returnPos = { -1 };
284                List<Row> res = new LinkedList<Row>();
285                Set<Serializable> remainingIds = new HashSet<Serializable>(ids);
286                while (rs.next()) {
287                    Serializable value = io.getCurrentFromResultSet(rs, select.whatColumns, model, returnId, returnPos);
288                    Serializable newId = returnId[0];
289                    if (newId != null && !newId.equals(curId)) {
290                        // flush old list
291                        if (list != null) {
292                            res.add(new Row(tableName, curId, type.collectionToArray(list)));
293                            remainingIds.remove(curId);
294                        }
295                        curId = newId;
296                        list = new ArrayList<Serializable>();
297                    }
298                    list.add(value);
299                }
300                if (curId != null && list != null) {
301                    // flush last list
302                    res.add(new Row(tableName, curId, type.collectionToArray(list)));
303                    remainingIds.remove(curId);
304                }
305
306                // fill empty ones
307                if (!remainingIds.isEmpty()) {
308                    Serializable[] emptyArray = ftype.getEmptyArray();
309                    for (Serializable id : remainingIds) {
310                        res.add(new Row(tableName, id, emptyArray));
311                    }
312                }
313                if (logger.isLogEnabled()) {
314                    for (Row row : res) {
315                        logger.log("  -> " + row);
316                    }
317                }
318                return res;
319            }
320        } catch (SQLException e) {
321            throw new NuxeoException("Could not select: " + sql, e);
322        }
323    }
324
325    /**
326     * Fetches the rows for a select with fixed criteria given as two maps (a criteriaMap whose values and up in the
327     * returned rows, and a joinMap for other criteria).
328     */
329    protected List<Row> getSelectRows(String tableName, SQLInfoSelect select, Map<String, Serializable> criteriaMap,
330            Map<String, Serializable> joinMap, boolean limitToOne) {
331        List<Row> list = new LinkedList<Row>();
332        if (select.whatColumns.isEmpty()) {
333            // happens when we fetch a fragment whose columns are all opaque
334            // check it's a by-id query
335            if (select.whereColumns.size() == 1 && select.whereColumns.get(0).getKey() == Model.MAIN_KEY
336                    && joinMap == null) {
337                Row row = new Row(tableName, criteriaMap);
338                if (select.opaqueColumns != null) {
339                    for (Column column : select.opaqueColumns) {
340                        row.putNew(column.getKey(), Row.OPAQUE);
341                    }
342                }
343                list.add(row);
344                return list;
345            }
346            // else do a useless select but the criteria are more complex and we
347            // can't shortcut
348        }
349        if (joinMap == null) {
350            joinMap = Collections.emptyMap();
351        }
352        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
353
354            /*
355             * Compute where part.
356             */
357            List<Serializable> debugValues = null;
358            if (logger.isLogEnabled()) {
359                debugValues = new LinkedList<Serializable>();
360            }
361            int i = 1;
362            for (Column column : select.whereColumns) {
363                String key = column.getKey();
364                Serializable v;
365                if (criteriaMap.containsKey(key)) {
366                    v = criteriaMap.get(key);
367                } else if (joinMap.containsKey(key)) {
368                    v = joinMap.get(key);
369                } else {
370                    throw new RuntimeException(key);
371                }
372                if (v == null) {
373                    throw new NuxeoException("Null value for key: " + key);
374                }
375                if (v instanceof Collection<?>) {
376                    // allow insert of several values, for the IN (...) case
377                    for (Object vv : (Collection<?>) v) {
378                        column.setToPreparedStatement(ps, i++, (Serializable) vv);
379                        if (debugValues != null) {
380                            debugValues.add((Serializable) vv);
381                        }
382                    }
383                } else {
384                    column.setToPreparedStatement(ps, i++, v);
385                    if (debugValues != null) {
386                        debugValues.add(v);
387                    }
388                }
389            }
390            if (debugValues != null) {
391                logger.logSQL(select.sql, debugValues);
392            }
393
394            /*
395             * Execute query.
396             */
397            try (ResultSet rs = ps.executeQuery()) {
398                countExecute();
399
400                /*
401                 * Construct the maps from the result set.
402                 */
403                while (rs.next()) {
404                    Row row = new Row(tableName, criteriaMap);
405                    i = 1;
406                    for (Column column : select.whatColumns) {
407                        row.put(column.getKey(), column.getFromResultSet(rs, i++));
408                    }
409                    if (select.opaqueColumns != null) {
410                        for (Column column : select.opaqueColumns) {
411                            row.putNew(column.getKey(), Row.OPAQUE);
412                        }
413                    }
414                    if (logger.isLogEnabled()) {
415                        logger.logResultSet(rs, select.whatColumns);
416                    }
417                    list.add(row);
418                    if (limitToOne) {
419                        return list;
420                    }
421                }
422            }
423            if (limitToOne) {
424                return Collections.emptyList();
425            }
426            return list;
427        } catch (SQLException e) {
428            checkConcurrentUpdate(e);
429            throw new NuxeoException("Could not select: " + select.sql, e);
430        }
431    }
432
433    @Override
434    public void write(RowBatch batch) {
435        if (!batch.creates.isEmpty()) {
436            writeCreates(batch.creates);
437        }
438        if (!batch.updates.isEmpty()) {
439            writeUpdates(batch.updates);
440        }
441        if (!batch.deletes.isEmpty()) {
442            writeDeletes(batch.deletes);
443        }
444        // batch.deletesDependent not executed
445    }
446
447    protected void writeCreates(List<Row> creates) {
448        // reorganize by table
449        Map<String, List<Row>> tableRows = new LinkedHashMap<String, List<Row>>();
450        // hierarchy table first because there are foreign keys to it
451        tableRows.put(Model.HIER_TABLE_NAME, new LinkedList<Row>());
452        for (Row row : creates) {
453            List<Row> rows = tableRows.get(row.tableName);
454            if (rows == null) {
455                tableRows.put(row.tableName, rows = new LinkedList<Row>());
456            }
457            rows.add(row);
458        }
459        // inserts on each table
460        for (Entry<String, List<Row>> en : tableRows.entrySet()) {
461            String tableName = en.getKey();
462            List<Row> rows = en.getValue();
463            if (model.isCollectionFragment(tableName)) {
464                List<RowUpdate> rowus = rows.stream().map(RowUpdate::new).collect(Collectors.toList());
465                insertCollectionRows(tableName, rowus);
466            } else {
467                insertSimpleRows(tableName, rows);
468            }
469        }
470    }
471
472    protected void writeUpdates(Set<RowUpdate> updates) {
473        // reorganize by table
474        Map<String, List<RowUpdate>> tableRows = new HashMap<String, List<RowUpdate>>();
475        for (RowUpdate rowu : updates) {
476            List<RowUpdate> rows = tableRows.get(rowu.row.tableName);
477            if (rows == null) {
478                tableRows.put(rowu.row.tableName, rows = new LinkedList<RowUpdate>());
479            }
480            rows.add(rowu);
481        }
482        // updates on each table
483        for (Entry<String, List<RowUpdate>> en : tableRows.entrySet()) {
484            String tableName = en.getKey();
485            List<RowUpdate> rows = en.getValue();
486            if (model.isCollectionFragment(tableName)) {
487                updateCollectionRows(tableName, rows);
488            } else {
489                updateSimpleRows(tableName, rows);
490            }
491        }
492    }
493
494    protected void writeDeletes(Collection<RowId> deletes) {
495        // reorganize by table
496        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
497        for (RowId rowId : deletes) {
498            Set<Serializable> ids = tableIds.get(rowId.tableName);
499            if (ids == null) {
500                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
501            }
502            ids.add(rowId.id);
503        }
504        // delete on each table
505        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
506            String tableName = en.getKey();
507            Set<Serializable> ids = en.getValue();
508            deleteRows(tableName, ids);
509        }
510    }
511
512    /**
513     * Inserts multiple rows, all for the same table.
514     */
515    protected void insertSimpleRows(String tableName, List<Row> rows) {
516        if (rows.isEmpty()) {
517            return;
518        }
519        String sql = sqlInfo.getInsertSql(tableName);
520        if (sql == null) {
521            throw new NuxeoException("Unknown table: " + tableName);
522        }
523        boolean batched = supportsBatchUpdates && rows.size() > 1;
524        String loggedSql = batched ? sql + " -- BATCHED" : sql;
525        List<Column> columns = sqlInfo.getInsertColumns(tableName);
526        try (PreparedStatement ps = connection.prepareStatement(sql)) {
527            int batch = 0;
528            for (Iterator<Row> rowIt = rows.iterator(); rowIt.hasNext();) {
529                Row row = rowIt.next();
530                if (logger.isLogEnabled()) {
531                    logger.logSQL(loggedSql, columns, row);
532                }
533                int i = 1;
534                for (Column column : columns) {
535                    column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
536                }
537                if (batched) {
538                    ps.addBatch();
539                    batch++;
540                    if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
541                        ps.executeBatch();
542                        countExecute();
543                    }
544                } else {
545                    ps.execute();
546                    countExecute();
547                }
548            }
549        } catch (SQLException e) {
550            if (e instanceof BatchUpdateException) {
551                BatchUpdateException bue = (BatchUpdateException) e;
552                if (e.getCause() == null && bue.getNextException() != null) {
553                    // provide a readable cause in the stack trace
554                    e.initCause(bue.getNextException());
555                }
556            }
557            checkConcurrentUpdate(e);
558            throw new NuxeoException("Could not insert: " + sql, e);
559        }
560    }
561
562    /**
563     * Updates multiple collection rows, all for the same table.
564     */
565    protected void insertCollectionRows(String tableName, List<RowUpdate> rowus) {
566        if (rowus.isEmpty()) {
567            return;
568        }
569        String sql = sqlInfo.getInsertSql(tableName);
570        List<Column> columns = sqlInfo.getInsertColumns(tableName);
571        CollectionIO io = getCollectionIO(tableName);
572        try (PreparedStatement ps = connection.prepareStatement(sql)) {
573            io.executeInserts(ps, rowus, columns, supportsBatchUpdates, sql, this);
574        } catch (SQLException e) {
575            throw new NuxeoException("Could not insert: " + sql, e);
576        }
577    }
578
579    /**
580     * Updates multiple simple rows, all for the same table.
581     */
582    protected void updateSimpleRows(String tableName, List<RowUpdate> rows) {
583        if (rows.isEmpty()) {
584            return;
585        }
586
587        // reorganize by identical queries to allow batching
588        Map<String, SQLInfoSelect> sqlToInfo = new HashMap<>();
589        Map<String, List<RowUpdate>> sqlRowUpdates = new HashMap<>();
590        for (RowUpdate rowu : rows) {
591            SQLInfoSelect update = sqlInfo.getUpdateById(tableName, rowu);
592            String sql = update.sql;
593            sqlToInfo.put(sql, update);
594            sqlRowUpdates.computeIfAbsent(sql, k -> new ArrayList<RowUpdate>()).add(rowu);
595        }
596
597        for (Entry<String, List<RowUpdate>> en : sqlRowUpdates.entrySet()) {
598            String sql = en.getKey();
599            List<RowUpdate> rowUpdates = en.getValue();
600            SQLInfoSelect update = sqlToInfo.get(sql);
601            boolean batched = supportsBatchUpdates && rowUpdates.size() > 1;
602            String loggedSql = batched ? update.sql + " -- BATCHED" : update.sql;
603            try (PreparedStatement ps = connection.prepareStatement(update.sql)) {
604                int batch = 0;
605                for (Iterator<RowUpdate> rowIt = rowUpdates.iterator(); rowIt.hasNext();) {
606                    RowUpdate rowu = rowIt.next();
607                    if (logger.isLogEnabled()) {
608                        logger.logSQL(loggedSql, update.whatColumns, rowu.row, update.whereColumns, rowu.conditions);
609                    }
610                    int i = 1;
611                    for (Column column : update.whatColumns) {
612                        Serializable value = rowu.row.get(column.getKey());
613                        if (value instanceof Delta) {
614                            value = ((Delta) value).getDeltaValue();
615                        }
616                        column.setToPreparedStatement(ps, i++, value);
617                    }
618                    for (Column column : update.whereColumns) {
619                        // id or condition
620                        String key = column.getKey();
621                        Serializable value;
622                        if (key.equals(Model.MAIN_KEY)) {
623                            value = rowu.row.get(key);
624                        } else {
625                            value = rowu.conditions.get(key);
626                        }
627                        column.setToPreparedStatement(ps, i++, value);
628                    }
629                    if (batched) {
630                        ps.addBatch();
631                        batch++;
632                        if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
633                            int[] counts = ps.executeBatch();
634                            countExecute();
635                            for (int j = 0; j < counts.length; j++) {
636                                if (counts[j] != 1) {
637                                    // find which id this is about
638                                    Serializable id = rowUpdates.get(j).row.id;
639                                    logger.log("  -> CONCURRENT UPDATE: " + id);
640                                    throw new ConcurrentUpdateException(id.toString());
641                                }
642
643                            }
644                        }
645                    } else {
646                        int count = ps.executeUpdate();
647                        countExecute();
648                        if (count != 1) {
649                            Serializable id = rowu.row.id;
650                            logger.log("  -> CONCURRENT UPDATE: " + id);
651                            throw new ConcurrentUpdateException(id.toString());
652                        }
653                    }
654                }
655            } catch (SQLException e) {
656                checkConcurrentUpdate(e);
657                throw new NuxeoException("Could not update: " + update.sql, e);
658            }
659        }
660    }
661
662    protected void updateCollectionRows(String tableName, List<RowUpdate> rowus) {
663        Set<Serializable> deleteIds = new HashSet<>();
664        for (RowUpdate rowu : rowus) {
665            if (rowu.pos == -1 || collectionDeleteBeforeAppend) {
666                deleteIds.add(rowu.row.id);
667            }
668        }
669        deleteRows(tableName, deleteIds);
670        insertCollectionRows(tableName, rowus);
671    }
672
673    /**
674     * Deletes multiple rows, all for the same table.
675     */
676    protected void deleteRows(String tableName, Set<Serializable> ids) {
677        if (ids.isEmpty()) {
678            return;
679        }
680        int size = ids.size();
681        int chunkSize = sqlInfo.getMaximumArgsForIn();
682        if (size > chunkSize) {
683            List<Serializable> idList = new ArrayList<Serializable>(ids);
684            for (int start = 0; start < size; start += chunkSize) {
685                int end = start + chunkSize;
686                if (end > size) {
687                    end = size;
688                }
689                // needs to be Serializable -> copy
690                List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
691                deleteRowsDirect(tableName, chunkIds);
692            }
693        } else {
694            deleteRowsDirect(tableName, ids);
695        }
696    }
697
698    protected void deleteRowsSoft(List<NodeInfo> nodeInfos) {
699        try {
700            int size = nodeInfos.size();
701            List<Serializable> ids = new ArrayList<Serializable>(size);
702            for (NodeInfo info : nodeInfos) {
703                ids.add(info.id);
704            }
705            int chunkSize = 100; // max size of ids array
706            if (size <= chunkSize) {
707                doSoftDeleteRows(ids);
708            } else {
709                for (int start = 0; start < size;) {
710                    int end = start + chunkSize;
711                    if (end > size) {
712                        end = size;
713                    }
714                    doSoftDeleteRows(ids.subList(start, end));
715                    start = end;
716                }
717            }
718        } catch (SQLException e) {
719            throw new NuxeoException("Could not soft delete", e);
720        }
721    }
722
723    // not chunked
724    protected void doSoftDeleteRows(List<Serializable> ids) throws SQLException {
725        Serializable whereIds = newIdArray(ids);
726        Calendar now = Calendar.getInstance();
727        String sql = sqlInfo.getSoftDeleteSql();
728        if (logger.isLogEnabled()) {
729            logger.logSQL(sql, Arrays.asList(whereIds, now));
730        }
731        try (PreparedStatement ps = connection.prepareStatement(sql)) {
732            setToPreparedStatementIdArray(ps, 1, whereIds);
733            dialect.setToPreparedStatementTimestamp(ps, 2, now, null);
734            ps.execute();
735            countExecute();
736            return;
737        }
738    }
739
740    protected Serializable newIdArray(Collection<Serializable> ids) {
741        if (dialect.supportsArrays()) {
742            return ids.toArray(); // Object[]
743        } else {
744            // join with '|'
745            StringBuilder b = new StringBuilder();
746            for (Serializable id : ids) {
747                b.append(id);
748                b.append('|');
749            }
750            b.setLength(b.length() - 1);
751            return b.toString();
752        }
753    }
754
755    protected void setToPreparedStatementIdArray(PreparedStatement ps, int index, Serializable idArray)
756            throws SQLException {
757        if (idArray instanceof String) {
758            ps.setString(index, (String) idArray);
759        } else {
760            Array array = dialect.createArrayOf(Types.OTHER, (Object[]) idArray, connection);
761            ps.setArray(index, array);
762        }
763    }
764
765    /**
766     * Clean up soft-deleted rows.
767     * <p>
768     * Rows deleted more recently than the beforeTime are left alone. Only a limited number of rows may be deleted, to
769     * prevent transaction during too long.
770     *
771     * @param max the maximum number of rows to delete at a time
772     * @param beforeTime the maximum deletion time of the rows to delete
773     * @return the number of rows deleted
774     */
775    public int cleanupDeletedRows(int max, Calendar beforeTime) {
776        if (max < 0) {
777            max = 0;
778        }
779        String sql = sqlInfo.getSoftDeleteCleanupSql();
780        if (logger.isLogEnabled()) {
781            logger.logSQL(sql, Arrays.<Serializable> asList(beforeTime, Long.valueOf(max)));
782        }
783        try {
784            if (sql.startsWith("{")) {
785                // callable statement
786                boolean outFirst = sql.startsWith("{?=");
787                int outIndex = outFirst ? 1 : 3;
788                int inIndex = outFirst ? 2 : 1;
789                try (CallableStatement cs = connection.prepareCall(sql)) {
790                    cs.setInt(inIndex, max);
791                    dialect.setToPreparedStatementTimestamp(cs, inIndex + 1, beforeTime, null);
792                    cs.registerOutParameter(outIndex, Types.INTEGER);
793                    cs.execute();
794                    int count = cs.getInt(outIndex);
795                    logger.logCount(count);
796                    return count;
797                }
798            } else {
799                // standard prepared statement with result set
800                try (PreparedStatement ps = connection.prepareStatement(sql)) {
801                    ps.setInt(1, max);
802                    dialect.setToPreparedStatementTimestamp(ps, 2, beforeTime, null);
803                    try (ResultSet rs = ps.executeQuery()) {
804                        countExecute();
805                        if (!rs.next()) {
806                            throw new NuxeoException("Cannot get result");
807                        }
808                        int count = rs.getInt(1);
809                        logger.logCount(count);
810                        return count;
811                    }
812                }
813            }
814        } catch (SQLException e) {
815            throw new NuxeoException("Could not purge soft delete", e);
816        }
817    }
818
819    protected void deleteRowsDirect(String tableName, Collection<Serializable> ids) {
820        String sql = sqlInfo.getDeleteSql(tableName, ids.size());
821        if (logger.isLogEnabled()) {
822            logger.logSQL(sql, ids);
823        }
824        try (PreparedStatement ps = connection.prepareStatement(sql)) {
825            int i = 1;
826            for (Serializable id : ids) {
827                dialect.setId(ps, i++, id);
828            }
829            int count = ps.executeUpdate();
830            countExecute();
831            logger.logCount(count);
832        } catch (SQLException e) {
833            checkConcurrentUpdate(e);
834            throw new NuxeoException("Could not delete: " + tableName, e);
835        }
836    }
837
838    @Override
839    public Row readSimpleRow(RowId rowId) {
840        SQLInfoSelect select = sqlInfo.selectFragmentById.get(rowId.tableName);
841        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, rowId.id);
842        List<Row> maps = getSelectRows(rowId.tableName, select, criteriaMap, null, true);
843        return maps.isEmpty() ? null : maps.get(0);
844    }
845
846    @Override
847    public Map<String, String> getBinaryFulltext(RowId rowId) {
848        ArrayList<String> columns = new ArrayList<String>();
849        for (String index : model.getFulltextConfiguration().indexesAllBinary) {
850            String col = Model.FULLTEXT_BINARYTEXT_KEY + model.getFulltextIndexSuffix(index);
851            columns.add(col);
852        }
853        Serializable id = rowId.id;
854        Map<String, String> ret = new HashMap<String, String>(columns.size());
855        String sql = dialect.getBinaryFulltextSql(columns);
856        if (sql == null) {
857            logger.info("getBinaryFulltextSql not supported for dialect " + dialect);
858            return ret;
859        }
860        if (logger.isLogEnabled()) {
861            logger.logSQL(sql, Collections.singletonList(id));
862        }
863        try (PreparedStatement ps = connection.prepareStatement(sql)) {
864            dialect.setId(ps, 1, id);
865            try (ResultSet rs = ps.executeQuery()) {
866                while (rs.next()) {
867                    for (int i = 1; i <= columns.size(); i++) {
868                        ret.put(columns.get(i - 1), rs.getString(i));
869                    }
870                }
871                if (logger.isLogEnabled()) {
872                    logger.log("  -> " + ret);
873                }
874            }
875            return ret;
876        } catch (SQLException e) {
877            throw new NuxeoException("Could not select: " + sql, e);
878        }
879    }
880
881    @Override
882    public Serializable[] readCollectionRowArray(RowId rowId) {
883        String tableName = rowId.tableName;
884        Serializable id = rowId.id;
885        String sql = sqlInfo.selectFragmentById.get(tableName).sql;
886        if (logger.isLogEnabled()) {
887            logger.logSQL(sql, Collections.singletonList(id));
888        }
889        try (PreparedStatement ps = connection.prepareStatement(sql)) {
890            List<Column> columns = sqlInfo.selectFragmentById.get(tableName).whatColumns;
891            dialect.setId(ps, 1, id); // assumes only one primary column
892            try (ResultSet rs = ps.executeQuery()) {
893                countExecute();
894
895                // construct the resulting collection using each row
896                CollectionIO io = getCollectionIO(tableName);
897                List<Serializable> list = new ArrayList<Serializable>();
898                Serializable[] returnId = new Serializable[1];
899                int[] returnPos = { -1 };
900                while (rs.next()) {
901                    list.add(io.getCurrentFromResultSet(rs, columns, model, returnId, returnPos));
902                }
903                PropertyType type = model.getCollectionFragmentType(tableName).getArrayBaseType();
904                Serializable[] array = type.collectionToArray(list);
905
906                if (logger.isLogEnabled()) {
907                    logger.log("  -> " + Arrays.asList(array));
908                }
909                return array;
910            }
911        } catch (SQLException e) {
912            throw new NuxeoException("Could not select: " + sql, e);
913        }
914    }
915
916    @Override
917    public List<Row> readSelectionRows(SelectionType selType, Serializable selId, Serializable filter,
918            Serializable criterion, boolean limitToOne) {
919        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
920        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
921        criteriaMap.put(selType.selKey, selId);
922        SQLInfoSelect select;
923        if (filter == null) {
924            select = selInfo.selectAll;
925        } else {
926            select = selInfo.selectFiltered;
927            criteriaMap.put(selType.filterKey, filter);
928        }
929        if (selType.criterionKey != null) {
930            criteriaMap.put(selType.criterionKey, criterion);
931        }
932        return getSelectRows(selType.tableName, select, criteriaMap, null, limitToOne);
933    }
934
935    @Override
936    public CopyResult copy(IdWithTypes source, Serializable destParentId, String destName, Row overwriteRow) {
937        // assert !model.separateMainTable; // other case not implemented
938        Invalidations invalidations = new Invalidations();
939        try {
940            Map<Serializable, Serializable> idMap = new LinkedHashMap<Serializable, Serializable>();
941            Map<Serializable, IdWithTypes> idToTypes = new HashMap<Serializable, IdWithTypes>();
942            // copy the hierarchy fragments recursively
943            Serializable overwriteId = overwriteRow == null ? null : overwriteRow.id;
944            if (overwriteId != null) {
945                // overwrite hier root with explicit values
946                String tableName = Model.HIER_TABLE_NAME;
947                updateSimpleRowWithValues(tableName, overwriteRow);
948                idMap.put(source.id, overwriteId);
949                // invalidate
950                invalidations.addModified(new RowId(tableName, overwriteId));
951            }
952            // create the new hierarchy by copy
953            boolean resetVersion = destParentId != null;
954            Serializable newRootId = copyHierRecursive(source, destParentId, destName, overwriteId, resetVersion,
955                    idMap, idToTypes);
956            // invalidate children
957            Serializable invalParentId = overwriteId == null ? destParentId : overwriteId;
958            if (invalParentId != null) { // null for a new version
959                invalidations.addModified(new RowId(Invalidations.PARENT, invalParentId));
960            }
961            // copy all collected fragments
962            Set<Serializable> proxyIds = new HashSet<Serializable>();
963            for (Entry<String, Set<Serializable>> entry : model.getPerFragmentIds(idToTypes).entrySet()) {
964                String tableName = entry.getKey();
965                if (tableName.equals(Model.HIER_TABLE_NAME)) {
966                    // already done
967                    continue;
968                }
969                if (tableName.equals(Model.VERSION_TABLE_NAME)) {
970                    // versions not fileable
971                    // restore must not copy versions either
972                    continue;
973                }
974                Set<Serializable> ids = entry.getValue();
975                if (tableName.equals(Model.PROXY_TABLE_NAME)) {
976                    for (Serializable id : ids) {
977                        proxyIds.add(idMap.get(id)); // copied ids
978                    }
979                }
980                Boolean invalidation = copyRows(tableName, ids, idMap, overwriteId);
981                if (invalidation != null) {
982                    // overwrote something
983                    // make sure things are properly invalidated in this and
984                    // other sessions
985                    if (Boolean.TRUE.equals(invalidation)) {
986                        invalidations.addModified(new RowId(tableName, overwriteId));
987                    } else {
988                        invalidations.addDeleted(new RowId(tableName, overwriteId));
989                    }
990                }
991            }
992            return new CopyResult(newRootId, invalidations, proxyIds);
993        } catch (SQLException e) {
994            throw new NuxeoException("Could not copy: " + source.id.toString(), e);
995        }
996    }
997
998    /**
999     * Updates a row in the database with given explicit values.
1000     */
1001    protected void updateSimpleRowWithValues(String tableName, Row row) {
1002        Update update = sqlInfo.getUpdateByIdForKeys(tableName, row.getKeys());
1003        Table table = update.getTable();
1004        String sql = update.getStatement();
1005        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1006            if (logger.isLogEnabled()) {
1007                List<Serializable> values = new LinkedList<Serializable>();
1008                values.addAll(row.getValues());
1009                values.add(row.id); // id last in SQL
1010                logger.logSQL(sql, values);
1011            }
1012            int i = 1;
1013            List<String> keys = row.getKeys();
1014            List<Serializable> values = row.getValues();
1015            int size = keys.size();
1016            for (int r = 0; r < size; r++) {
1017                String key = keys.get(r);
1018                Serializable value = values.get(r);
1019                table.getColumn(key).setToPreparedStatement(ps, i++, value);
1020            }
1021            dialect.setId(ps, i, row.id); // id last in SQL
1022            int count = ps.executeUpdate();
1023            countExecute();
1024        } catch (SQLException e) {
1025            throw new NuxeoException("Could not update: " + sql, e);
1026        }
1027    }
1028
1029    /**
1030     * Copies hierarchy from id to parentId, and recurses.
1031     * <p>
1032     * If name is {@code null}, then the original name is kept.
1033     * <p>
1034     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1035     * filled with the type of each (source) fragment.
1036     * <p>
1037     * TODO: this should be optimized to use a stored procedure.
1038     *
1039     * @param overwriteId when not {@code null}, the copy is done onto this existing node (skipped)
1040     * @return the new root id
1041     */
1042    protected Serializable copyHierRecursive(IdWithTypes source, Serializable parentId, String name,
1043            Serializable overwriteId, boolean resetVersion, Map<Serializable, Serializable> idMap,
1044            Map<Serializable, IdWithTypes> idToTypes) throws SQLException {
1045        idToTypes.put(source.id, source);
1046        Serializable newId;
1047        if (overwriteId == null) {
1048            newId = copyHier(source.id, parentId, name, resetVersion, idMap);
1049        } else {
1050            newId = overwriteId;
1051            idMap.put(source.id, newId);
1052        }
1053        // recurse in children
1054        boolean onlyComplex = parentId == null;
1055        for (IdWithTypes child : getChildrenIdsWithTypes(source.id, onlyComplex)) {
1056            copyHierRecursive(child, newId, null, null, resetVersion, idMap, idToTypes);
1057        }
1058        return newId;
1059    }
1060
1061    /**
1062     * Copies hierarchy from id to a new child of parentId.
1063     * <p>
1064     * If name is {@code null}, then the original name is kept.
1065     * <p>
1066     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1067     * filled with the type of each (source) fragment.
1068     *
1069     * @return the new id
1070     */
1071    protected Serializable copyHier(Serializable id, Serializable parentId, String name, boolean resetVersion,
1072            Map<Serializable, Serializable> idMap) throws SQLException {
1073        boolean explicitName = name != null;
1074
1075        SQLInfoSelect copy = sqlInfo.getCopyHier(explicitName, resetVersion);
1076        try (PreparedStatement ps = connection.prepareStatement(copy.sql)) {
1077            Serializable newId = generateNewId();
1078
1079            List<Serializable> debugValues = null;
1080            if (logger.isLogEnabled()) {
1081                debugValues = new ArrayList<Serializable>(4);
1082            }
1083            int i = 1;
1084            for (Column column : copy.whatColumns) {
1085                String key = column.getKey();
1086                Serializable v;
1087                if (key.equals(Model.HIER_PARENT_KEY)) {
1088                    v = parentId;
1089                } else if (key.equals(Model.HIER_CHILD_NAME_KEY)) {
1090                    // present if name explicitely set (first iteration)
1091                    v = name;
1092                } else if (key.equals(Model.MAIN_KEY)) {
1093                    // present if APP_UUID generation
1094                    v = newId;
1095                } else if (key.equals(Model.MAIN_BASE_VERSION_KEY) || key.equals(Model.MAIN_CHECKED_IN_KEY)) {
1096                    v = null;
1097                } else if (key.equals(Model.MAIN_MINOR_VERSION_KEY) || key.equals(Model.MAIN_MAJOR_VERSION_KEY)) {
1098                    // present if reset version (regular copy, not checkin)
1099                    v = null;
1100                } else {
1101                    throw new RuntimeException(column.toString());
1102                }
1103                column.setToPreparedStatement(ps, i++, v);
1104                if (debugValues != null) {
1105                    debugValues.add(v);
1106                }
1107            }
1108            // last parameter is for 'WHERE "id" = ?'
1109            Column whereColumn = copy.whereColumns.get(0);
1110            whereColumn.setToPreparedStatement(ps, i, id);
1111            if (debugValues != null) {
1112                debugValues.add(id);
1113                logger.logSQL(copy.sql, debugValues);
1114            }
1115            int count = ps.executeUpdate();
1116            countExecute();
1117
1118            // TODO DB_IDENTITY
1119            // post insert fetch idrow
1120
1121            idMap.put(id, newId);
1122            return newId;
1123        }
1124    }
1125
1126    /**
1127     * Gets the children ids and types of a node.
1128     */
1129    protected List<IdWithTypes> getChildrenIdsWithTypes(Serializable id, boolean onlyComplex) throws SQLException {
1130        List<IdWithTypes> children = new LinkedList<IdWithTypes>();
1131        String sql = sqlInfo.getSelectChildrenIdsAndTypesSql(onlyComplex);
1132        if (logger.isLogEnabled()) {
1133            logger.logSQL(sql, Collections.singletonList(id));
1134        }
1135        List<Column> columns = sqlInfo.getSelectChildrenIdsAndTypesWhatColumns();
1136        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1137            List<String> debugValues = null;
1138            if (logger.isLogEnabled()) {
1139                debugValues = new LinkedList<String>();
1140            }
1141            dialect.setId(ps, 1, id); // parent id
1142            try (ResultSet rs = ps.executeQuery()) {
1143                countExecute();
1144                while (rs.next()) {
1145                    Serializable childId = null;
1146                    String childPrimaryType = null;
1147                    String[] childMixinTypes = null;
1148                    int i = 1;
1149                    for (Column column : columns) {
1150                        String key = column.getKey();
1151                        Serializable value = column.getFromResultSet(rs, i++);
1152                        if (key.equals(Model.MAIN_KEY)) {
1153                            childId = value;
1154                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1155                            childPrimaryType = (String) value;
1156                        } else if (key.equals(Model.MAIN_MIXIN_TYPES_KEY)) {
1157                            childMixinTypes = (String[]) value;
1158                        }
1159                    }
1160                    children.add(new IdWithTypes(childId, childPrimaryType, childMixinTypes));
1161                    if (debugValues != null) {
1162                        debugValues.add(childId + "/" + childPrimaryType + "/" + Arrays.toString(childMixinTypes));
1163                    }
1164                }
1165            }
1166            if (debugValues != null) {
1167                logger.log("  -> " + debugValues);
1168            }
1169            return children;
1170        }
1171    }
1172
1173    /**
1174     * Copy the rows from tableName with given ids into new ones with new ids given by idMap.
1175     * <p>
1176     * A new row with id {@code overwriteId} is first deleted.
1177     *
1178     * @return {@link Boolean#TRUE} for a modification or creation, {@link Boolean#FALSE} for a deletion, {@code null}
1179     *         otherwise (still absent)
1180     * @throws SQLException
1181     */
1182    protected Boolean copyRows(String tableName, Set<Serializable> ids, Map<Serializable, Serializable> idMap,
1183            Serializable overwriteId) throws SQLException {
1184        String copySql = sqlInfo.getCopySql(tableName);
1185        Column copyIdColumn = sqlInfo.getCopyIdColumn(tableName);
1186        String deleteSql = sqlInfo.getDeleteSql(tableName);
1187        try (PreparedStatement copyPs = connection.prepareStatement(copySql);
1188                PreparedStatement deletePs = connection.prepareStatement(deleteSql)) {
1189            boolean before = false;
1190            boolean after = false;
1191            for (Serializable id : ids) {
1192                Serializable newId = idMap.get(id);
1193                boolean overwrite = newId.equals(overwriteId);
1194                if (overwrite) {
1195                    // remove existing first
1196                    if (logger.isLogEnabled()) {
1197                        logger.logSQL(deleteSql, Collections.singletonList(newId));
1198                    }
1199                    dialect.setId(deletePs, 1, newId);
1200                    int delCount = deletePs.executeUpdate();
1201                    countExecute();
1202                    before = delCount > 0;
1203                }
1204                copyIdColumn.setToPreparedStatement(copyPs, 1, newId);
1205                copyIdColumn.setToPreparedStatement(copyPs, 2, id);
1206                if (logger.isLogEnabled()) {
1207                    logger.logSQL(copySql, Arrays.asList(newId, id));
1208                }
1209                int copyCount = copyPs.executeUpdate();
1210                countExecute();
1211                if (overwrite) {
1212                    after = copyCount > 0;
1213                }
1214            }
1215            // * , n -> mod (TRUE)
1216            // n , 0 -> del (FALSE)
1217            // 0 , 0 -> null
1218            return after ? Boolean.TRUE : (before ? Boolean.FALSE : null);
1219        }
1220    }
1221
1222    @Override
1223    public List<NodeInfo> remove(NodeInfo rootInfo) {
1224        Serializable rootId = rootInfo.id;
1225        List<NodeInfo> info = getDescendantsInfo(rootId);
1226        info.add(rootInfo);
1227        if (sqlInfo.softDeleteEnabled) {
1228            deleteRowsSoft(info);
1229        } else {
1230            deleteRowsDirect(Model.HIER_TABLE_NAME, Collections.singleton(rootId));
1231        }
1232        return info;
1233    }
1234
1235    protected List<NodeInfo> getDescendantsInfo(Serializable rootId) {
1236        if (!dialect.supportsFastDescendants()) {
1237            return getDescendantsInfoIterative(rootId);
1238        }
1239        List<NodeInfo> descendants = new LinkedList<NodeInfo>();
1240        String sql = sqlInfo.getSelectDescendantsInfoSql();
1241        if (logger.isLogEnabled()) {
1242            logger.logSQL(sql, Collections.singletonList(rootId));
1243        }
1244        List<Column> columns = sqlInfo.getSelectDescendantsInfoWhatColumns();
1245        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1246            List<String> debugValues = null;
1247            if (logger.isLogEnabled()) {
1248                debugValues = new LinkedList<String>();
1249            }
1250            dialect.setId(ps, 1, rootId); // parent id
1251            try (ResultSet rs = ps.executeQuery()) {
1252                countExecute();
1253                while (rs.next()) {
1254                    Serializable id = null;
1255                    Serializable parentId = null;
1256                    String primaryType = null;
1257                    Boolean isProperty = null;
1258                    Serializable targetId = null;
1259                    Serializable versionableId = null;
1260                    int i = 1;
1261                    for (Column column : columns) {
1262                        String key = column.getKey();
1263                        Serializable value = column.getFromResultSet(rs, i++);
1264                        if (key.equals(Model.MAIN_KEY)) {
1265                            id = value;
1266                        } else if (key.equals(Model.HIER_PARENT_KEY)) {
1267                            parentId = value;
1268                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1269                            primaryType = (String) value;
1270                        } else if (key.equals(Model.HIER_CHILD_ISPROPERTY_KEY)) {
1271                            isProperty = (Boolean) value;
1272                        } else if (key.equals(Model.PROXY_TARGET_KEY)) {
1273                            targetId = value;
1274                        } else if (key.equals(Model.PROXY_VERSIONABLE_KEY)) {
1275                            versionableId = value;
1276                        }
1277                        // no mixins (not useful to caller)
1278                        // no versions (not fileable)
1279                    }
1280                    descendants.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1281                    if (debugValues != null) {
1282                        if (debugValues.size() < DEBUG_MAX_TREE) {
1283                            debugValues.add(id + "/" + primaryType);
1284                        }
1285                    }
1286                }
1287            }
1288            if (debugValues != null) {
1289                if (debugValues.size() >= DEBUG_MAX_TREE) {
1290                    debugValues.add("... (" + descendants.size() + ") results");
1291                }
1292                logger.log("  -> " + debugValues);
1293            }
1294            return descendants;
1295        } catch (SQLException e) {
1296            throw new NuxeoException("Failed to get descendants", e);
1297        }
1298    }
1299
1300    protected List<NodeInfo> getDescendantsInfoIterative(Serializable rootId) {
1301        Set<Serializable> done = new HashSet<>();
1302        List<Serializable> todo = new ArrayList<>(Collections.singleton(rootId));
1303        List<NodeInfo> descendants = new ArrayList<NodeInfo>();
1304        while (!todo.isEmpty()) {
1305            List<NodeInfo> infos;
1306            int size = todo.size();
1307            int chunkSize = sqlInfo.getMaximumArgsForIn();
1308            if (size > chunkSize) {
1309                infos = new ArrayList<>();
1310                for (int start = 0; start < size; start += chunkSize) {
1311                    int end = start + chunkSize;
1312                    if (end > size) {
1313                        end = size;
1314                    }
1315                    // needs to be Serializable -> copy
1316                    List<Serializable> chunkTodo = new ArrayList<Serializable>(todo.subList(start, end));
1317                    List<NodeInfo> chunkInfos = getChildrenNodeInfos(chunkTodo);
1318                    infos.addAll(chunkInfos);
1319                }
1320            } else {
1321                infos = getChildrenNodeInfos(todo);
1322            }
1323            todo = new ArrayList<>();
1324            for (NodeInfo info : infos) {
1325                Serializable id = info.id;
1326                if (!done.add(id)) {
1327                    continue;
1328                }
1329                todo.add(id);
1330                descendants.add(info);
1331            }
1332        }
1333        return descendants;
1334    }
1335
1336    /**
1337     * Gets the children of a node as a list of NodeInfo.
1338     */
1339    protected List<NodeInfo> getChildrenNodeInfos(Collection<Serializable> ids) {
1340        List<NodeInfo> children = new LinkedList<NodeInfo>();
1341        SQLInfoSelect select = sqlInfo.getSelectChildrenNodeInfos(ids.size());
1342        if (logger.isLogEnabled()) {
1343            logger.logSQL(select.sql, ids);
1344        }
1345        Column where = select.whereColumns.get(0);
1346        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
1347            List<String> debugValues = null;
1348            if (logger.isLogEnabled()) {
1349                debugValues = new LinkedList<String>();
1350            }
1351            int ii = 1;
1352            for (Serializable id : ids) {
1353                where.setToPreparedStatement(ps, ii++, id);
1354            }
1355            try (ResultSet
1356            rs = ps.executeQuery()) {
1357                countExecute();
1358                while (rs.next()) {
1359                    Serializable id = null;
1360                    Serializable parentId = null;
1361                    String primaryType = null;
1362                    Boolean isProperty = Boolean.FALSE;
1363                    Serializable targetId = null;
1364                    Serializable versionableId = null;
1365                    int i = 1;
1366                    for (Column column : select.whatColumns) {
1367                        String key = column.getKey();
1368                        Serializable value = column.getFromResultSet(rs, i++);
1369                        if (key.equals(Model.MAIN_KEY)) {
1370                            id = value;
1371                        } else if (key.equals(Model.HIER_PARENT_KEY)) {
1372                            parentId = value;
1373                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1374                            primaryType = (String) value;
1375                        } else if (key.equals(Model.PROXY_TARGET_KEY)) {
1376                            targetId = value;
1377                        } else if (key.equals(Model.PROXY_VERSIONABLE_KEY)) {
1378                            versionableId = value;
1379                        }
1380                    }
1381                    children.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1382                    if (debugValues != null) {
1383                        if (debugValues.size() < DEBUG_MAX_TREE) {
1384                            debugValues.add(id + "/" + primaryType);
1385                        }
1386                    }
1387                }
1388            }
1389            if (debugValues != null) {
1390                if (debugValues.size() >= DEBUG_MAX_TREE) {
1391                    debugValues.add("... (" + children.size() + ") results");
1392                }
1393                logger.log("  -> " + debugValues);
1394            }
1395            return children;
1396        } catch (SQLException e) {
1397            throw new NuxeoException("Failed to get descendants", e);
1398        }
1399    }
1400
1401}