001/*
002 * (C) Copyright 2006-2016 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.sql.jdbc;
020
021import java.io.Serializable;
022import java.sql.Array;
023import java.sql.BatchUpdateException;
024import java.sql.CallableStatement;
025import java.sql.PreparedStatement;
026import java.sql.ResultSet;
027import java.sql.SQLException;
028import java.sql.Statement;
029import java.sql.Types;
030import java.util.ArrayList;
031import java.util.Arrays;
032import java.util.Calendar;
033import java.util.Collection;
034import java.util.Collections;
035import java.util.HashMap;
036import java.util.HashSet;
037import java.util.Iterator;
038import java.util.LinkedHashMap;
039import java.util.LinkedList;
040import java.util.List;
041import java.util.ListIterator;
042import java.util.Map;
043import java.util.Map.Entry;
044import java.util.Set;
045import java.util.stream.Collectors;
046
047import javax.transaction.xa.XAException;
048import javax.transaction.xa.Xid;
049
050import org.apache.commons.lang.StringUtils;
051import org.nuxeo.ecm.core.api.ConcurrentUpdateException;
052import org.nuxeo.ecm.core.api.NuxeoException;
053import org.nuxeo.ecm.core.api.model.Delta;
054import org.nuxeo.ecm.core.storage.sql.ClusterInvalidator;
055import org.nuxeo.ecm.core.storage.sql.Invalidations;
056import org.nuxeo.ecm.core.storage.sql.InvalidationsPropagator;
057import org.nuxeo.ecm.core.storage.sql.Model;
058import org.nuxeo.ecm.core.storage.sql.PropertyType;
059import org.nuxeo.ecm.core.storage.sql.Row;
060import org.nuxeo.ecm.core.storage.sql.RowId;
061import org.nuxeo.ecm.core.storage.sql.RowMapper;
062import org.nuxeo.ecm.core.storage.sql.SelectionType;
063import org.nuxeo.ecm.core.storage.sql.SimpleFragment;
064import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelect;
065import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelection;
066import org.nuxeo.ecm.core.storage.sql.jdbc.db.Column;
067import org.nuxeo.ecm.core.storage.sql.jdbc.db.Table;
068import org.nuxeo.ecm.core.storage.sql.jdbc.db.Update;
069import org.nuxeo.runtime.api.Framework;
070import org.nuxeo.runtime.services.config.ConfigurationService;
071
072/**
073 * A {@link JDBCRowMapper} maps {@link Row}s to and from a JDBC database.
074 */
075public class JDBCRowMapper extends JDBCConnection implements RowMapper {
076
077    public static final int UPDATE_BATCH_SIZE = 100; // also insert/delete
078
079    public static final int DEBUG_MAX_TREE = 50;
080
081    /** Property to determine whether collection appends delete all then re-insert, or are optimized for append. */
082    public static final String COLLECTION_DELETE_BEFORE_APPEND_PROP = "org.nuxeo.vcs.list-delete-before-append";
083
084    /**
085     * Cluster invalidator, or {@code null} if this mapper does not participate in invalidation propagation (cluster
086     * invalidator, lock manager).
087     */
088    private final ClusterInvalidator clusterInvalidator;
089
090    private final InvalidationsPropagator invalidationsPropagator;
091
092    private final boolean collectionDeleteBeforeAppend;
093
094    private final CollectionIO aclCollectionIO;
095
096    private final CollectionIO scalarCollectionIO;
097
098    public JDBCRowMapper(Model model, SQLInfo sqlInfo, ClusterInvalidator clusterInvalidator,
099            InvalidationsPropagator invalidationsPropagator) {
100        super(model, sqlInfo);
101        this.clusterInvalidator = clusterInvalidator;
102        this.invalidationsPropagator = invalidationsPropagator;
103        ConfigurationService configurationService = Framework.getService(ConfigurationService.class);
104        collectionDeleteBeforeAppend = configurationService.isBooleanPropertyTrue(COLLECTION_DELETE_BEFORE_APPEND_PROP);
105        aclCollectionIO = new ACLCollectionIO(collectionDeleteBeforeAppend);
106        scalarCollectionIO = new ScalarCollectionIO(collectionDeleteBeforeAppend);
107    }
108
109    @Override
110    public Invalidations receiveInvalidations() {
111        if (clusterInvalidator != null) {
112            Invalidations invalidations = clusterInvalidator.receiveInvalidations();
113            // send received invalidations to all mappers
114            if (invalidations != null && !invalidations.isEmpty()) {
115                invalidationsPropagator.propagateInvalidations(invalidations, null);
116            }
117            return invalidations;
118        } else {
119            return null;
120        }
121    }
122
123
124    @Override
125    public void sendInvalidations(Invalidations invalidations) {
126        if (clusterInvalidator != null) {
127            clusterInvalidator.sendInvalidations(invalidations);
128        }
129    }
130
131    @Override
132    public void clearCache() {
133        // no cache
134    }
135
136    @Override
137    public long getCacheSize() {
138        return 0;
139    }
140
141    @Override
142    public void rollback(Xid xid) throws XAException {
143        try {
144            xaresource.rollback(xid);
145        } catch (XAException e) {
146            logger.error("XA error on rollback: " + e);
147            throw e;
148        }
149    }
150
151    protected CollectionIO getCollectionIO(String tableName) {
152        return tableName.equals(Model.ACL_TABLE_NAME) ? aclCollectionIO : scalarCollectionIO;
153    }
154
155    @Override
156    public Serializable generateNewId() {
157        try {
158            return dialect.getGeneratedId(connection);
159        } catch (SQLException e) {
160            throw new NuxeoException(e);
161        }
162    }
163
164    /*
165     * ----- RowIO -----
166     */
167
168    @Override
169    public List<? extends RowId> read(Collection<RowId> rowIds, boolean cacheOnly) {
170        List<RowId> res = new ArrayList<RowId>(rowIds.size());
171        if (cacheOnly) {
172            // return no data
173            for (RowId rowId : rowIds) {
174                res.add(new RowId(rowId));
175            }
176            return res;
177        }
178        // reorganize by table
179        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
180        for (RowId rowId : rowIds) {
181            Set<Serializable> ids = tableIds.get(rowId.tableName);
182            if (ids == null) {
183                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
184            }
185            ids.add(rowId.id);
186        }
187        // read on each table
188        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
189            String tableName = en.getKey();
190            Set<Serializable> ids = new HashSet<Serializable>(en.getValue());
191            int size = ids.size();
192            int chunkSize = sqlInfo.getMaximumArgsForIn();
193            List<Row> rows;
194            if (size > chunkSize) {
195                List<Serializable> idList = new ArrayList<Serializable>(ids);
196                rows = new ArrayList<Row>(size);
197                for (int start = 0; start < size; start += chunkSize) {
198                    int end = start + chunkSize;
199                    if (end > size) {
200                        end = size;
201                    }
202                    // needs to be Serializable -> copy
203                    List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
204                    List<Row> chunkRows;
205                    if (model.isCollectionFragment(tableName)) {
206                        chunkRows = readCollectionArrays(tableName, chunkIds);
207                    } else {
208                        chunkRows = readSimpleRows(tableName, chunkIds);
209                    }
210                    rows.addAll(chunkRows);
211                }
212            } else {
213                if (model.isCollectionFragment(tableName)) {
214                    rows = readCollectionArrays(tableName, ids);
215                } else {
216                    rows = readSimpleRows(tableName, ids);
217                }
218            }
219            // check we have all the ids (readSimpleRows may have some
220            // missing)
221            for (Row row : rows) {
222                res.add(row);
223                ids.remove(row.id);
224            }
225            // for the missing ids record an empty RowId
226            for (Serializable id : ids) {
227                res.add(new RowId(tableName, id));
228            }
229        }
230        return res;
231    }
232
233    /**
234     * Gets a list of rows for {@link SimpleFragment}s from the database, given the table name and the ids.
235     *
236     * @param tableName the table name
237     * @param ids the ids
238     * @return the list of rows, without the missing ones
239     */
240    protected List<Row> readSimpleRows(String tableName, Collection<Serializable> ids) {
241        if (ids.isEmpty()) {
242            return Collections.emptyList();
243        }
244        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size());
245        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, (Serializable) ids);
246        return getSelectRows(tableName, select, criteriaMap, null, false);
247    }
248
249    /**
250     * Reads several collection rows, given a table name and the ids.
251     *
252     * @param tableName the table name
253     * @param ids the ids
254     */
255    protected List<Row> readCollectionArrays(String tableName, Collection<Serializable> ids) {
256        if (ids.isEmpty()) {
257            return Collections.emptyList();
258        }
259        String[] orderBys = { Model.MAIN_KEY, Model.COLL_TABLE_POS_KEY }; // clusters
260                                                                          // results
261        Set<String> skipColumns = new HashSet<String>(Arrays.asList(Model.COLL_TABLE_POS_KEY));
262        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size(), orderBys, skipColumns);
263
264        String sql = select.sql;
265        if (logger.isLogEnabled()) {
266            logger.logSQL(sql, ids);
267        }
268        try (PreparedStatement ps = connection.prepareStatement(sql)) {
269            int i = 1;
270            for (Serializable id : ids) {
271                dialect.setId(ps, i++, id);
272            }
273            try (ResultSet rs = ps.executeQuery()) {
274                countExecute();
275
276                // get all values from result set, separate by ids
277                // the result set is ordered by id, pos
278                CollectionIO io = getCollectionIO(tableName);
279                PropertyType ftype = model.getCollectionFragmentType(tableName);
280                PropertyType type = ftype.getArrayBaseType();
281                Serializable curId = null;
282                List<Serializable> list = null;
283                Serializable[] returnId = new Serializable[1];
284                int[] returnPos = { -1 };
285                List<Row> res = new LinkedList<Row>();
286                Set<Serializable> remainingIds = new HashSet<Serializable>(ids);
287                while (rs.next()) {
288                    Serializable value = io.getCurrentFromResultSet(rs, select.whatColumns, model, returnId, returnPos);
289                    Serializable newId = returnId[0];
290                    if (newId != null && !newId.equals(curId)) {
291                        // flush old list
292                        if (list != null) {
293                            res.add(new Row(tableName, curId, type.collectionToArray(list)));
294                            remainingIds.remove(curId);
295                        }
296                        curId = newId;
297                        list = new ArrayList<Serializable>();
298                    }
299                    list.add(value);
300                }
301                if (curId != null && list != null) {
302                    // flush last list
303                    res.add(new Row(tableName, curId, type.collectionToArray(list)));
304                    remainingIds.remove(curId);
305                }
306
307                // fill empty ones
308                if (!remainingIds.isEmpty()) {
309                    Serializable[] emptyArray = ftype.getEmptyArray();
310                    for (Serializable id : remainingIds) {
311                        res.add(new Row(tableName, id, emptyArray));
312                    }
313                }
314                if (logger.isLogEnabled()) {
315                    for (Row row : res) {
316                        logger.log("  -> " + row);
317                    }
318                }
319                return res;
320            }
321        } catch (SQLException e) {
322            throw new NuxeoException("Could not select: " + sql, e);
323        }
324    }
325
326    /**
327     * Fetches the rows for a select with fixed criteria given as two maps (a criteriaMap whose values and up in the
328     * returned rows, and a joinMap for other criteria).
329     */
330    protected List<Row> getSelectRows(String tableName, SQLInfoSelect select, Map<String, Serializable> criteriaMap,
331            Map<String, Serializable> joinMap, boolean limitToOne) {
332        List<Row> list = new LinkedList<Row>();
333        if (select.whatColumns.isEmpty()) {
334            // happens when we fetch a fragment whose columns are all opaque
335            // check it's a by-id query
336            if (select.whereColumns.size() == 1 && select.whereColumns.get(0).getKey() == Model.MAIN_KEY
337                    && joinMap == null) {
338                Row row = new Row(tableName, criteriaMap);
339                if (select.opaqueColumns != null) {
340                    for (Column column : select.opaqueColumns) {
341                        row.putNew(column.getKey(), Row.OPAQUE);
342                    }
343                }
344                list.add(row);
345                return list;
346            }
347            // else do a useless select but the criteria are more complex and we
348            // can't shortcut
349        }
350        if (joinMap == null) {
351            joinMap = Collections.emptyMap();
352        }
353        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
354
355            /*
356             * Compute where part.
357             */
358            List<Serializable> debugValues = null;
359            if (logger.isLogEnabled()) {
360                debugValues = new LinkedList<Serializable>();
361            }
362            int i = 1;
363            for (Column column : select.whereColumns) {
364                String key = column.getKey();
365                Serializable v;
366                if (criteriaMap.containsKey(key)) {
367                    v = criteriaMap.get(key);
368                } else if (joinMap.containsKey(key)) {
369                    v = joinMap.get(key);
370                } else {
371                    throw new RuntimeException(key);
372                }
373                if (v == null) {
374                    throw new NuxeoException("Null value for key: " + key);
375                }
376                if (v instanceof Collection<?>) {
377                    // allow insert of several values, for the IN (...) case
378                    for (Object vv : (Collection<?>) v) {
379                        column.setToPreparedStatement(ps, i++, (Serializable) vv);
380                        if (debugValues != null) {
381                            debugValues.add((Serializable) vv);
382                        }
383                    }
384                } else {
385                    column.setToPreparedStatement(ps, i++, v);
386                    if (debugValues != null) {
387                        debugValues.add(v);
388                    }
389                }
390            }
391            if (debugValues != null) {
392                logger.logSQL(select.sql, debugValues);
393            }
394
395            /*
396             * Execute query.
397             */
398            try (ResultSet rs = ps.executeQuery()) {
399                countExecute();
400
401                /*
402                 * Construct the maps from the result set.
403                 */
404                while (rs.next()) {
405                    // TODO using criteriaMap is wrong if it contains a Collection
406                    Row row = new Row(tableName, criteriaMap);
407                    i = 1;
408                    for (Column column : select.whatColumns) {
409                        row.put(column.getKey(), column.getFromResultSet(rs, i++));
410                    }
411                    if (select.opaqueColumns != null) {
412                        for (Column column : select.opaqueColumns) {
413                            row.putNew(column.getKey(), Row.OPAQUE);
414                        }
415                    }
416                    if (logger.isLogEnabled()) {
417                        logger.logResultSet(rs, select.whatColumns);
418                    }
419                    list.add(row);
420                    if (limitToOne) {
421                        return list;
422                    }
423                }
424            }
425            if (limitToOne) {
426                return Collections.emptyList();
427            }
428            return list;
429        } catch (SQLException e) {
430            checkConcurrentUpdate(e);
431            throw new NuxeoException("Could not select: " + select.sql, e);
432        }
433    }
434
435    @Override
436    public void write(RowBatch batch) {
437        // do deletes first to avoid violating constraint of unique child name in parent
438        // when replacing a complex list element
439        if (!batch.deletes.isEmpty()) {
440            writeDeletes(batch.deletes);
441        }
442        // batch.deletesDependent not executed
443        if (!batch.creates.isEmpty()) {
444            writeCreates(batch.creates);
445        }
446        if (!batch.updates.isEmpty()) {
447            writeUpdates(batch.updates);
448        }
449    }
450
451    protected void writeCreates(List<Row> creates) {
452        // reorganize by table
453        Map<String, List<Row>> tableRows = new LinkedHashMap<String, List<Row>>();
454        // hierarchy table first because there are foreign keys to it
455        tableRows.put(Model.HIER_TABLE_NAME, new LinkedList<Row>());
456        for (Row row : creates) {
457            List<Row> rows = tableRows.get(row.tableName);
458            if (rows == null) {
459                tableRows.put(row.tableName, rows = new LinkedList<Row>());
460            }
461            rows.add(row);
462        }
463        // inserts on each table
464        for (Entry<String, List<Row>> en : tableRows.entrySet()) {
465            String tableName = en.getKey();
466            List<Row> rows = en.getValue();
467            if (model.isCollectionFragment(tableName)) {
468                List<RowUpdate> rowus = rows.stream().map(RowUpdate::new).collect(Collectors.toList());
469                insertCollectionRows(tableName, rowus);
470            } else {
471                insertSimpleRows(tableName, rows);
472            }
473        }
474    }
475
476    protected void writeUpdates(Set<RowUpdate> updates) {
477        // reorganize by table
478        Map<String, List<RowUpdate>> tableRows = new HashMap<String, List<RowUpdate>>();
479        for (RowUpdate rowu : updates) {
480            List<RowUpdate> rows = tableRows.get(rowu.row.tableName);
481            if (rows == null) {
482                tableRows.put(rowu.row.tableName, rows = new LinkedList<RowUpdate>());
483            }
484            rows.add(rowu);
485        }
486        // updates on each table
487        for (Entry<String, List<RowUpdate>> en : tableRows.entrySet()) {
488            String tableName = en.getKey();
489            List<RowUpdate> rows = en.getValue();
490            if (model.isCollectionFragment(tableName)) {
491                updateCollectionRows(tableName, rows);
492            } else {
493                updateSimpleRows(tableName, rows);
494            }
495        }
496    }
497
498    protected void writeDeletes(Collection<RowId> deletes) {
499        // reorganize by table
500        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
501        for (RowId rowId : deletes) {
502            Set<Serializable> ids = tableIds.get(rowId.tableName);
503            if (ids == null) {
504                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
505            }
506            ids.add(rowId.id);
507        }
508        // delete on each table
509        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
510            String tableName = en.getKey();
511            Set<Serializable> ids = en.getValue();
512            deleteRows(tableName, ids);
513        }
514    }
515
516    /**
517     * Inserts multiple rows, all for the same table.
518     */
519    protected void insertSimpleRows(String tableName, List<Row> rows) {
520        if (rows.isEmpty()) {
521            return;
522        }
523        String sql = sqlInfo.getInsertSql(tableName);
524        if (sql == null) {
525            throw new NuxeoException("Unknown table: " + tableName);
526        }
527        boolean batched = supportsBatchUpdates && rows.size() > 1;
528        String loggedSql = batched ? sql + " -- BATCHED" : sql;
529        List<Column> columns = sqlInfo.getInsertColumns(tableName);
530        try (PreparedStatement ps = connection.prepareStatement(sql)) {
531            int batch = 0;
532            for (Iterator<Row> rowIt = rows.iterator(); rowIt.hasNext();) {
533                Row row = rowIt.next();
534                if (logger.isLogEnabled()) {
535                    logger.logSQL(loggedSql, columns, row);
536                }
537                int i = 1;
538                for (Column column : columns) {
539                    column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
540                }
541                if (batched) {
542                    ps.addBatch();
543                    batch++;
544                    if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
545                        ps.executeBatch();
546                        countExecute();
547                    }
548                } else {
549                    ps.execute();
550                    countExecute();
551                }
552            }
553        } catch (SQLException e) {
554            if (e instanceof BatchUpdateException) {
555                BatchUpdateException bue = (BatchUpdateException) e;
556                if (e.getCause() == null && bue.getNextException() != null) {
557                    // provide a readable cause in the stack trace
558                    e.initCause(bue.getNextException());
559                }
560            }
561            checkConcurrentUpdate(e);
562            throw new NuxeoException("Could not insert: " + sql, e);
563        }
564    }
565
566    /**
567     * Updates multiple collection rows, all for the same table.
568     */
569    protected void insertCollectionRows(String tableName, List<RowUpdate> rowus) {
570        if (rowus.isEmpty()) {
571            return;
572        }
573        String sql = sqlInfo.getInsertSql(tableName);
574        List<Column> columns = sqlInfo.getInsertColumns(tableName);
575        CollectionIO io = getCollectionIO(tableName);
576        try (PreparedStatement ps = connection.prepareStatement(sql)) {
577            io.executeInserts(ps, rowus, columns, supportsBatchUpdates, sql, this);
578        } catch (SQLException e) {
579            throw new NuxeoException("Could not insert: " + sql, e);
580        }
581    }
582
583    /**
584     * Updates multiple simple rows, all for the same table.
585     */
586    protected void updateSimpleRows(String tableName, List<RowUpdate> rows) {
587        if (rows.isEmpty()) {
588            return;
589        }
590
591        // reorganize by identical queries to allow batching
592        Map<String, SQLInfoSelect> sqlToInfo = new HashMap<>();
593        Map<String, List<RowUpdate>> sqlRowUpdates = new HashMap<>();
594        for (RowUpdate rowu : rows) {
595            SQLInfoSelect update = sqlInfo.getUpdateById(tableName, rowu);
596            String sql = update.sql;
597            sqlToInfo.put(sql, update);
598            sqlRowUpdates.computeIfAbsent(sql, k -> new ArrayList<RowUpdate>()).add(rowu);
599        }
600
601        for (Entry<String, List<RowUpdate>> en : sqlRowUpdates.entrySet()) {
602            String sql = en.getKey();
603            List<RowUpdate> rowUpdates = en.getValue();
604            SQLInfoSelect update = sqlToInfo.get(sql);
605            boolean changeTokenEnabled = model.getRepositoryDescriptor().isChangeTokenEnabled();
606            boolean batched = supportsBatchUpdates && rowUpdates.size() > 1
607                    && (dialect.supportsBatchUpdateCount() || !changeTokenEnabled);
608            String loggedSql = batched ? update.sql + " -- BATCHED" : update.sql;
609            try (PreparedStatement ps = connection.prepareStatement(update.sql)) {
610                int batch = 0;
611                for (Iterator<RowUpdate> rowIt = rowUpdates.iterator(); rowIt.hasNext();) {
612                    RowUpdate rowu = rowIt.next();
613                    if (logger.isLogEnabled()) {
614                        logger.logSQL(loggedSql, update.whatColumns, rowu.row, update.whereColumns, rowu.conditions);
615                    }
616                    int i = 1;
617                    for (Column column : update.whatColumns) {
618                        Serializable value = rowu.row.get(column.getKey());
619                        if (value instanceof Delta) {
620                            value = ((Delta) value).getDeltaValue();
621                        }
622                        column.setToPreparedStatement(ps, i++, value);
623                    }
624                    boolean hasConditions = false;
625                    for (Column column : update.whereColumns) {
626                        // id or condition
627                        String key = column.getKey();
628                        Serializable value;
629                        if (key.equals(Model.MAIN_KEY)) {
630                            value = rowu.row.get(key);
631                        } else {
632                            hasConditions = true;
633                            value = rowu.conditions.get(key);
634                        }
635                        column.setToPreparedStatement(ps, i++, value);
636                    }
637                    if (batched) {
638                        ps.addBatch();
639                        batch++;
640                        if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
641                            int[] counts = ps.executeBatch();
642                            countExecute();
643                            if (changeTokenEnabled && hasConditions) {
644                                for (int j = 0; j < counts.length; j++) {
645                                    int count = counts[j];
646                                    if (count != Statement.SUCCESS_NO_INFO && count != 1) {
647                                        Serializable id = rowUpdates.get(j).row.id;
648                                        logger.log("  -> CONCURRENT UPDATE: " + id);
649                                        throw new ConcurrentUpdateException(id.toString());
650                                    }
651                                }
652                            }
653                        }
654                    } else {
655                        int count = ps.executeUpdate();
656                        countExecute();
657                        if (changeTokenEnabled && hasConditions) {
658                            if (count != Statement.SUCCESS_NO_INFO && count != 1) {
659                                Serializable id = rowu.row.id;
660                                logger.log("  -> CONCURRENT UPDATE: " + id);
661                                throw new ConcurrentUpdateException(id.toString());
662                            }
663                        }
664                    }
665                }
666            } catch (SQLException e) {
667                checkConcurrentUpdate(e);
668                throw new NuxeoException("Could not update: " + update.sql, e);
669            }
670        }
671    }
672
673    protected void updateCollectionRows(String tableName, List<RowUpdate> rowus) {
674        Set<Serializable> deleteIds = new HashSet<>();
675        for (RowUpdate rowu : rowus) {
676            if (rowu.pos == -1 || collectionDeleteBeforeAppend) {
677                deleteIds.add(rowu.row.id);
678            }
679        }
680        deleteRows(tableName, deleteIds);
681        insertCollectionRows(tableName, rowus);
682    }
683
684    /**
685     * Deletes multiple rows, all for the same table.
686     */
687    protected void deleteRows(String tableName, Set<Serializable> ids) {
688        if (ids.isEmpty()) {
689            return;
690        }
691        int size = ids.size();
692        int chunkSize = sqlInfo.getMaximumArgsForIn();
693        if (size > chunkSize) {
694            List<Serializable> idList = new ArrayList<Serializable>(ids);
695            for (int start = 0; start < size; start += chunkSize) {
696                int end = start + chunkSize;
697                if (end > size) {
698                    end = size;
699                }
700                // needs to be Serializable -> copy
701                List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
702                deleteRowsDirect(tableName, chunkIds);
703            }
704        } else {
705            deleteRowsDirect(tableName, ids);
706        }
707    }
708
709    protected void deleteRowsSoft(List<NodeInfo> nodeInfos) {
710        try {
711            int size = nodeInfos.size();
712            List<Serializable> ids = new ArrayList<Serializable>(size);
713            for (NodeInfo info : nodeInfos) {
714                ids.add(info.id);
715            }
716            int chunkSize = 100; // max size of ids array
717            if (size <= chunkSize) {
718                doSoftDeleteRows(ids);
719            } else {
720                for (int start = 0; start < size;) {
721                    int end = start + chunkSize;
722                    if (end > size) {
723                        end = size;
724                    }
725                    doSoftDeleteRows(ids.subList(start, end));
726                    start = end;
727                }
728            }
729        } catch (SQLException e) {
730            throw new NuxeoException("Could not soft delete", e);
731        }
732    }
733
734    // not chunked
735    protected void doSoftDeleteRows(List<Serializable> ids) throws SQLException {
736        Serializable whereIds = newIdArray(ids);
737        Calendar now = Calendar.getInstance();
738        String sql = sqlInfo.getSoftDeleteSql();
739        if (logger.isLogEnabled()) {
740            logger.logSQL(sql, Arrays.asList(whereIds, now));
741        }
742        try (PreparedStatement ps = connection.prepareStatement(sql)) {
743            setToPreparedStatementIdArray(ps, 1, whereIds);
744            dialect.setToPreparedStatementTimestamp(ps, 2, now, null);
745            ps.execute();
746            countExecute();
747            return;
748        }
749    }
750
751    protected Serializable newIdArray(Collection<Serializable> ids) {
752        if (dialect.supportsArrays()) {
753            return ids.toArray(); // Object[]
754        } else {
755            // join with '|'
756            StringBuilder b = new StringBuilder();
757            for (Serializable id : ids) {
758                b.append(id);
759                b.append('|');
760            }
761            b.setLength(b.length() - 1);
762            return b.toString();
763        }
764    }
765
766    protected void setToPreparedStatementIdArray(PreparedStatement ps, int index, Serializable idArray)
767            throws SQLException {
768        if (idArray instanceof String) {
769            ps.setString(index, (String) idArray);
770        } else {
771            Array array = dialect.createArrayOf(Types.OTHER, (Object[]) idArray, connection);
772            ps.setArray(index, array);
773        }
774    }
775
776    /**
777     * Clean up soft-deleted rows.
778     * <p>
779     * Rows deleted more recently than the beforeTime are left alone. Only a limited number of rows may be deleted, to
780     * prevent transaction during too long.
781     *
782     * @param max the maximum number of rows to delete at a time
783     * @param beforeTime the maximum deletion time of the rows to delete
784     * @return the number of rows deleted
785     */
786    public int cleanupDeletedRows(int max, Calendar beforeTime) {
787        if (max < 0) {
788            max = 0;
789        }
790        String sql = sqlInfo.getSoftDeleteCleanupSql();
791        if (logger.isLogEnabled()) {
792            logger.logSQL(sql, Arrays.<Serializable> asList(beforeTime, Long.valueOf(max)));
793        }
794        try {
795            if (sql.startsWith("{")) {
796                // callable statement
797                boolean outFirst = sql.startsWith("{?=");
798                int outIndex = outFirst ? 1 : 3;
799                int inIndex = outFirst ? 2 : 1;
800                try (CallableStatement cs = connection.prepareCall(sql)) {
801                    cs.setInt(inIndex, max);
802                    dialect.setToPreparedStatementTimestamp(cs, inIndex + 1, beforeTime, null);
803                    cs.registerOutParameter(outIndex, Types.INTEGER);
804                    cs.execute();
805                    int count = cs.getInt(outIndex);
806                    logger.logCount(count);
807                    return count;
808                }
809            } else {
810                // standard prepared statement with result set
811                try (PreparedStatement ps = connection.prepareStatement(sql)) {
812                    ps.setInt(1, max);
813                    dialect.setToPreparedStatementTimestamp(ps, 2, beforeTime, null);
814                    try (ResultSet rs = ps.executeQuery()) {
815                        countExecute();
816                        if (!rs.next()) {
817                            throw new NuxeoException("Cannot get result");
818                        }
819                        int count = rs.getInt(1);
820                        logger.logCount(count);
821                        return count;
822                    }
823                }
824            }
825        } catch (SQLException e) {
826            throw new NuxeoException("Could not purge soft delete", e);
827        }
828    }
829
830    protected void deleteRowsDirect(String tableName, Collection<Serializable> ids) {
831        String sql = sqlInfo.getDeleteSql(tableName, ids.size());
832        if (logger.isLogEnabled()) {
833            logger.logSQL(sql, ids);
834        }
835        try (PreparedStatement ps = connection.prepareStatement(sql)) {
836            int i = 1;
837            for (Serializable id : ids) {
838                dialect.setId(ps, i++, id);
839            }
840            int count = ps.executeUpdate();
841            countExecute();
842            logger.logCount(count);
843        } catch (SQLException e) {
844            checkConcurrentUpdate(e);
845            throw new NuxeoException("Could not delete: " + tableName, e);
846        }
847    }
848
849    @Override
850    public Row readSimpleRow(RowId rowId) {
851        SQLInfoSelect select = sqlInfo.selectFragmentById.get(rowId.tableName);
852        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, rowId.id);
853        List<Row> maps = getSelectRows(rowId.tableName, select, criteriaMap, null, true);
854        return maps.isEmpty() ? null : maps.get(0);
855    }
856
857    @Override
858    public Map<String, String> getBinaryFulltext(RowId rowId) {
859        ArrayList<String> columns = new ArrayList<String>();
860        for (String index : model.getFulltextConfiguration().indexesAllBinary) {
861            String col = Model.FULLTEXT_BINARYTEXT_KEY + model.getFulltextIndexSuffix(index);
862            columns.add(col);
863        }
864        Serializable id = rowId.id;
865        Map<String, String> ret = new HashMap<String, String>(columns.size());
866        String sql = dialect.getBinaryFulltextSql(columns);
867        if (sql == null) {
868            logger.info("getBinaryFulltextSql not supported for dialect " + dialect);
869            return ret;
870        }
871        if (logger.isLogEnabled()) {
872            logger.logSQL(sql, Collections.singletonList(id));
873        }
874        try (PreparedStatement ps = connection.prepareStatement(sql)) {
875            dialect.setId(ps, 1, id);
876            try (ResultSet rs = ps.executeQuery()) {
877                while (rs.next()) {
878                    for (int i = 1; i <= columns.size(); i++) {
879                        ret.put(columns.get(i - 1), rs.getString(i));
880                    }
881                }
882                if (logger.isLogEnabled()) {
883                    logger.log("  -> " + ret);
884                }
885            }
886            return ret;
887        } catch (SQLException e) {
888            throw new NuxeoException("Could not select: " + sql, e);
889        }
890    }
891
892    @Override
893    public Serializable[] readCollectionRowArray(RowId rowId) {
894        String tableName = rowId.tableName;
895        Serializable id = rowId.id;
896        String sql = sqlInfo.selectFragmentById.get(tableName).sql;
897        if (logger.isLogEnabled()) {
898            logger.logSQL(sql, Collections.singletonList(id));
899        }
900        try (PreparedStatement ps = connection.prepareStatement(sql)) {
901            List<Column> columns = sqlInfo.selectFragmentById.get(tableName).whatColumns;
902            dialect.setId(ps, 1, id); // assumes only one primary column
903            try (ResultSet rs = ps.executeQuery()) {
904                countExecute();
905
906                // construct the resulting collection using each row
907                CollectionIO io = getCollectionIO(tableName);
908                List<Serializable> list = new ArrayList<Serializable>();
909                Serializable[] returnId = new Serializable[1];
910                int[] returnPos = { -1 };
911                while (rs.next()) {
912                    list.add(io.getCurrentFromResultSet(rs, columns, model, returnId, returnPos));
913                }
914                PropertyType type = model.getCollectionFragmentType(tableName).getArrayBaseType();
915                Serializable[] array = type.collectionToArray(list);
916
917                if (logger.isLogEnabled()) {
918                    logger.log("  -> " + Arrays.asList(array));
919                }
920                return array;
921            }
922        } catch (SQLException e) {
923            throw new NuxeoException("Could not select: " + sql, e);
924        }
925    }
926
927    @Override
928    public List<Row> readSelectionRows(SelectionType selType, Serializable selId, Serializable filter,
929            Serializable criterion, boolean limitToOne) {
930        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
931        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
932        criteriaMap.put(selType.selKey, selId);
933        SQLInfoSelect select;
934        if (filter == null) {
935            select = selInfo.selectAll;
936        } else {
937            select = selInfo.selectFiltered;
938            criteriaMap.put(selType.filterKey, filter);
939        }
940        if (selType.criterionKey != null) {
941            criteriaMap.put(selType.criterionKey, criterion);
942        }
943        return getSelectRows(selType.tableName, select, criteriaMap, null, limitToOne);
944    }
945
946    @Override
947    public Set<Serializable> readSelectionsIds(SelectionType selType, List<Serializable> values) {
948        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
949        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
950        Set<Serializable> ids = new HashSet<>();
951        int size = values.size();
952        int chunkSize = sqlInfo.getMaximumArgsForIn();
953        if (size > chunkSize) {
954            for (int start = 0; start < size; start += chunkSize) {
955                int end = start + chunkSize;
956                if (end > size) {
957                    end = size;
958                }
959                // needs to be Serializable -> copy
960                List<Serializable> chunkTodo = new ArrayList<Serializable>(values.subList(start, end));
961                criteriaMap.put(selType.selKey, (Serializable) chunkTodo);
962                SQLInfoSelect select = selInfo.getSelectSelectionIds(chunkTodo.size());
963                List<Row> rows = getSelectRows(selType.tableName, select, criteriaMap, null, false);
964                rows.forEach(row -> ids.add(row.id));
965            }
966        } else {
967            criteriaMap.put(selType.selKey, (Serializable) values);
968            SQLInfoSelect select = selInfo.getSelectSelectionIds(values.size());
969            List<Row> rows = getSelectRows(selType.tableName, select, criteriaMap, null, false);
970            rows.forEach(row -> ids.add(row.id));
971        }
972        return ids;
973    }
974
975    @Override
976    public CopyResult copy(IdWithTypes source, Serializable destParentId, String destName, Row overwriteRow) {
977        // assert !model.separateMainTable; // other case not implemented
978        Invalidations invalidations = new Invalidations();
979        try {
980            Map<Serializable, Serializable> idMap = new LinkedHashMap<Serializable, Serializable>();
981            Map<Serializable, IdWithTypes> idToTypes = new HashMap<Serializable, IdWithTypes>();
982            // copy the hierarchy fragments recursively
983            Serializable overwriteId = overwriteRow == null ? null : overwriteRow.id;
984            if (overwriteId != null) {
985                // overwrite hier root with explicit values
986                String tableName = Model.HIER_TABLE_NAME;
987                updateSimpleRowWithValues(tableName, overwriteRow);
988                idMap.put(source.id, overwriteId);
989                // invalidate
990                invalidations.addModified(new RowId(tableName, overwriteId));
991            }
992            // create the new hierarchy by copy
993            boolean resetVersion = destParentId != null;
994            Serializable newRootId = copyHierRecursive(source, destParentId, destName, overwriteId, resetVersion,
995                    idMap, idToTypes);
996            // invalidate children
997            Serializable invalParentId = overwriteId == null ? destParentId : overwriteId;
998            if (invalParentId != null) { // null for a new version
999                invalidations.addModified(new RowId(Invalidations.PARENT, invalParentId));
1000            }
1001            // copy all collected fragments
1002            Set<Serializable> proxyIds = new HashSet<Serializable>();
1003            for (Entry<String, Set<Serializable>> entry : model.getPerFragmentIds(idToTypes).entrySet()) {
1004                String tableName = entry.getKey();
1005                if (tableName.equals(Model.HIER_TABLE_NAME)) {
1006                    // already done
1007                    continue;
1008                }
1009                if (tableName.equals(Model.VERSION_TABLE_NAME)) {
1010                    // versions not fileable
1011                    // restore must not copy versions either
1012                    continue;
1013                }
1014                Set<Serializable> ids = entry.getValue();
1015                if (tableName.equals(Model.PROXY_TABLE_NAME)) {
1016                    for (Serializable id : ids) {
1017                        proxyIds.add(idMap.get(id)); // copied ids
1018                    }
1019                }
1020                Boolean invalidation = copyRows(tableName, ids, idMap, overwriteId);
1021                if (invalidation != null) {
1022                    // overwrote something
1023                    // make sure things are properly invalidated in this and
1024                    // other sessions
1025                    if (Boolean.TRUE.equals(invalidation)) {
1026                        invalidations.addModified(new RowId(tableName, overwriteId));
1027                    } else {
1028                        invalidations.addDeleted(new RowId(tableName, overwriteId));
1029                    }
1030                }
1031            }
1032            return new CopyResult(newRootId, invalidations, proxyIds);
1033        } catch (SQLException e) {
1034            throw new NuxeoException("Could not copy: " + source.id.toString(), e);
1035        }
1036    }
1037
1038    /**
1039     * Updates a row in the database with given explicit values.
1040     */
1041    protected void updateSimpleRowWithValues(String tableName, Row row) {
1042        Update update = sqlInfo.getUpdateByIdForKeys(tableName, row.getKeys());
1043        Table table = update.getTable();
1044        String sql = update.getStatement();
1045        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1046            if (logger.isLogEnabled()) {
1047                List<Serializable> values = new LinkedList<Serializable>();
1048                values.addAll(row.getValues());
1049                values.add(row.id); // id last in SQL
1050                logger.logSQL(sql, values);
1051            }
1052            int i = 1;
1053            List<String> keys = row.getKeys();
1054            List<Serializable> values = row.getValues();
1055            int size = keys.size();
1056            for (int r = 0; r < size; r++) {
1057                String key = keys.get(r);
1058                Serializable value = values.get(r);
1059                table.getColumn(key).setToPreparedStatement(ps, i++, value);
1060            }
1061            dialect.setId(ps, i, row.id); // id last in SQL
1062            int count = ps.executeUpdate();
1063            countExecute();
1064        } catch (SQLException e) {
1065            throw new NuxeoException("Could not update: " + sql, e);
1066        }
1067    }
1068
1069    /**
1070     * Copies hierarchy from id to parentId, and recurses.
1071     * <p>
1072     * If name is {@code null}, then the original name is kept.
1073     * <p>
1074     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1075     * filled with the type of each (source) fragment.
1076     * <p>
1077     * TODO: this should be optimized to use a stored procedure.
1078     *
1079     * @param overwriteId when not {@code null}, the copy is done onto this existing node (skipped)
1080     * @return the new root id
1081     */
1082    protected Serializable copyHierRecursive(IdWithTypes source, Serializable parentId, String name,
1083            Serializable overwriteId, boolean resetVersion, Map<Serializable, Serializable> idMap,
1084            Map<Serializable, IdWithTypes> idToTypes) throws SQLException {
1085        idToTypes.put(source.id, source);
1086        Serializable newId;
1087        if (overwriteId == null) {
1088            newId = copyHier(source.id, parentId, name, resetVersion, idMap);
1089        } else {
1090            newId = overwriteId;
1091            idMap.put(source.id, newId);
1092        }
1093        // recurse in children
1094        boolean onlyComplex = parentId == null;
1095        for (IdWithTypes child : getChildrenIdsWithTypes(source.id, onlyComplex)) {
1096            copyHierRecursive(child, newId, null, null, resetVersion, idMap, idToTypes);
1097        }
1098        return newId;
1099    }
1100
1101    /**
1102     * Copies hierarchy from id to a new child of parentId.
1103     * <p>
1104     * If name is {@code null}, then the original name is kept.
1105     * <p>
1106     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1107     * filled with the type of each (source) fragment.
1108     *
1109     * @return the new id
1110     */
1111    protected Serializable copyHier(Serializable id, Serializable parentId, String name, boolean resetVersion,
1112            Map<Serializable, Serializable> idMap) throws SQLException {
1113        boolean explicitName = name != null;
1114
1115        SQLInfoSelect copy = sqlInfo.getCopyHier(explicitName, resetVersion);
1116        try (PreparedStatement ps = connection.prepareStatement(copy.sql)) {
1117            Serializable newId = generateNewId();
1118
1119            List<Serializable> debugValues = null;
1120            if (logger.isLogEnabled()) {
1121                debugValues = new ArrayList<Serializable>(4);
1122            }
1123            int i = 1;
1124            for (Column column : copy.whatColumns) {
1125                String key = column.getKey();
1126                Serializable v;
1127                if (key.equals(Model.HIER_PARENT_KEY)) {
1128                    v = parentId;
1129                } else if (key.equals(Model.HIER_CHILD_NAME_KEY)) {
1130                    // present if name explicitely set (first iteration)
1131                    v = name;
1132                } else if (key.equals(Model.MAIN_KEY)) {
1133                    // present if APP_UUID generation
1134                    v = newId;
1135                } else if (key.equals(Model.MAIN_BASE_VERSION_KEY) || key.equals(Model.MAIN_CHECKED_IN_KEY)) {
1136                    v = null;
1137                } else if (key.equals(Model.MAIN_MINOR_VERSION_KEY) || key.equals(Model.MAIN_MAJOR_VERSION_KEY)) {
1138                    // present if reset version (regular copy, not checkin)
1139                    v = null;
1140                } else {
1141                    throw new RuntimeException(column.toString());
1142                }
1143                column.setToPreparedStatement(ps, i++, v);
1144                if (debugValues != null) {
1145                    debugValues.add(v);
1146                }
1147            }
1148            // last parameter is for 'WHERE "id" = ?'
1149            Column whereColumn = copy.whereColumns.get(0);
1150            whereColumn.setToPreparedStatement(ps, i, id);
1151            if (debugValues != null) {
1152                debugValues.add(id);
1153                logger.logSQL(copy.sql, debugValues);
1154            }
1155            int count = ps.executeUpdate();
1156            countExecute();
1157
1158            // TODO DB_IDENTITY
1159            // post insert fetch idrow
1160
1161            idMap.put(id, newId);
1162            return newId;
1163        }
1164    }
1165
1166    /**
1167     * Gets the children ids and types of a node.
1168     */
1169    protected List<IdWithTypes> getChildrenIdsWithTypes(Serializable id, boolean onlyComplex) throws SQLException {
1170        List<IdWithTypes> children = new LinkedList<IdWithTypes>();
1171        String sql = sqlInfo.getSelectChildrenIdsAndTypesSql(onlyComplex);
1172        if (logger.isLogEnabled()) {
1173            logger.logSQL(sql, Collections.singletonList(id));
1174        }
1175        List<Column> columns = sqlInfo.getSelectChildrenIdsAndTypesWhatColumns();
1176        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1177            List<String> debugValues = null;
1178            if (logger.isLogEnabled()) {
1179                debugValues = new LinkedList<String>();
1180            }
1181            dialect.setId(ps, 1, id); // parent id
1182            try (ResultSet rs = ps.executeQuery()) {
1183                countExecute();
1184                while (rs.next()) {
1185                    Serializable childId = null;
1186                    String childPrimaryType = null;
1187                    String[] childMixinTypes = null;
1188                    int i = 1;
1189                    for (Column column : columns) {
1190                        String key = column.getKey();
1191                        Serializable value = column.getFromResultSet(rs, i++);
1192                        if (key.equals(Model.MAIN_KEY)) {
1193                            childId = value;
1194                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1195                            childPrimaryType = (String) value;
1196                        } else if (key.equals(Model.MAIN_MIXIN_TYPES_KEY)) {
1197                            childMixinTypes = (String[]) value;
1198                        }
1199                    }
1200                    children.add(new IdWithTypes(childId, childPrimaryType, childMixinTypes));
1201                    if (debugValues != null) {
1202                        debugValues.add(childId + "/" + childPrimaryType + "/" + Arrays.toString(childMixinTypes));
1203                    }
1204                }
1205            }
1206            if (debugValues != null) {
1207                logger.log("  -> " + debugValues);
1208            }
1209            return children;
1210        }
1211    }
1212
1213    /**
1214     * Copy the rows from tableName with given ids into new ones with new ids given by idMap.
1215     * <p>
1216     * A new row with id {@code overwriteId} is first deleted.
1217     *
1218     * @return {@link Boolean#TRUE} for a modification or creation, {@link Boolean#FALSE} for a deletion, {@code null}
1219     *         otherwise (still absent)
1220     * @throws SQLException
1221     */
1222    protected Boolean copyRows(String tableName, Set<Serializable> ids, Map<Serializable, Serializable> idMap,
1223            Serializable overwriteId) throws SQLException {
1224        String copySql = sqlInfo.getCopySql(tableName);
1225        Column copyIdColumn = sqlInfo.getCopyIdColumn(tableName);
1226        String deleteSql = sqlInfo.getDeleteSql(tableName);
1227        try (PreparedStatement copyPs = connection.prepareStatement(copySql);
1228                PreparedStatement deletePs = connection.prepareStatement(deleteSql)) {
1229            boolean before = false;
1230            boolean after = false;
1231            for (Serializable id : ids) {
1232                Serializable newId = idMap.get(id);
1233                boolean overwrite = newId.equals(overwriteId);
1234                if (overwrite) {
1235                    // remove existing first
1236                    if (logger.isLogEnabled()) {
1237                        logger.logSQL(deleteSql, Collections.singletonList(newId));
1238                    }
1239                    dialect.setId(deletePs, 1, newId);
1240                    int delCount = deletePs.executeUpdate();
1241                    countExecute();
1242                    before = delCount > 0;
1243                }
1244                copyIdColumn.setToPreparedStatement(copyPs, 1, newId);
1245                copyIdColumn.setToPreparedStatement(copyPs, 2, id);
1246                if (logger.isLogEnabled()) {
1247                    logger.logSQL(copySql, Arrays.asList(newId, id));
1248                }
1249                int copyCount = copyPs.executeUpdate();
1250                countExecute();
1251                if (overwrite) {
1252                    after = copyCount > 0;
1253                }
1254            }
1255            // * , n -> mod (TRUE)
1256            // n , 0 -> del (FALSE)
1257            // 0 , 0 -> null
1258            return after ? Boolean.TRUE : (before ? Boolean.FALSE : null);
1259        }
1260    }
1261
1262    @Override
1263    public void remove(Serializable rootId, List<NodeInfo> nodeInfos) {
1264        if (sqlInfo.softDeleteEnabled) {
1265            deleteRowsSoft(nodeInfos);
1266        } else {
1267            deleteRowsDirect(Model.HIER_TABLE_NAME, Collections.singleton(rootId));
1268        }
1269    }
1270
1271    @Override
1272    public List<NodeInfo> getDescendantsInfo(Serializable rootId) {
1273        if (!dialect.supportsFastDescendants()) {
1274            return getDescendantsInfoIterative(rootId);
1275        }
1276        List<NodeInfo> descendants = new LinkedList<NodeInfo>();
1277        String sql = sqlInfo.getSelectDescendantsInfoSql();
1278        if (logger.isLogEnabled()) {
1279            logger.logSQL(sql, Collections.singletonList(rootId));
1280        }
1281        List<Column> columns = sqlInfo.getSelectDescendantsInfoWhatColumns();
1282        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1283            List<String> debugValues = null;
1284            if (logger.isLogEnabled()) {
1285                debugValues = new LinkedList<String>();
1286            }
1287            dialect.setId(ps, 1, rootId); // parent id
1288            try (ResultSet rs = ps.executeQuery()) {
1289                countExecute();
1290                while (rs.next()) {
1291                    Serializable id = null;
1292                    Serializable parentId = null;
1293                    String primaryType = null;
1294                    Boolean isProperty = null;
1295                    Serializable targetId = null;
1296                    Serializable versionableId = null;
1297                    int i = 1;
1298                    for (Column column : columns) {
1299                        String key = column.getKey();
1300                        Serializable value = column.getFromResultSet(rs, i++);
1301                        if (key.equals(Model.MAIN_KEY)) {
1302                            id = value;
1303                        } else if (key.equals(Model.HIER_PARENT_KEY)) {
1304                            parentId = value;
1305                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1306                            primaryType = (String) value;
1307                        } else if (key.equals(Model.HIER_CHILD_ISPROPERTY_KEY)) {
1308                            isProperty = (Boolean) value;
1309                        } else if (key.equals(Model.PROXY_TARGET_KEY)) {
1310                            targetId = value;
1311                        } else if (key.equals(Model.PROXY_VERSIONABLE_KEY)) {
1312                            versionableId = value;
1313                        }
1314                        // no mixins (not useful to caller)
1315                        // no versions (not fileable)
1316                    }
1317                    descendants.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1318                    if (debugValues != null) {
1319                        if (debugValues.size() < DEBUG_MAX_TREE) {
1320                            debugValues.add(id + "/" + primaryType);
1321                        }
1322                    }
1323                }
1324            }
1325            if (debugValues != null) {
1326                if (debugValues.size() >= DEBUG_MAX_TREE) {
1327                    debugValues.add("... (" + descendants.size() + ") results");
1328                }
1329                logger.log("  -> " + debugValues);
1330            }
1331            return descendants;
1332        } catch (SQLException e) {
1333            throw new NuxeoException("Failed to get descendants", e);
1334        }
1335    }
1336
1337    protected List<NodeInfo> getDescendantsInfoIterative(Serializable rootId) {
1338        Set<Serializable> done = new HashSet<>();
1339        List<Serializable> todo = new ArrayList<>(Collections.singleton(rootId));
1340        List<NodeInfo> descendants = new ArrayList<NodeInfo>();
1341        while (!todo.isEmpty()) {
1342            List<NodeInfo> infos;
1343            int size = todo.size();
1344            int chunkSize = sqlInfo.getMaximumArgsForIn();
1345            if (size > chunkSize) {
1346                infos = new ArrayList<>();
1347                for (int start = 0; start < size; start += chunkSize) {
1348                    int end = start + chunkSize;
1349                    if (end > size) {
1350                        end = size;
1351                    }
1352                    // needs to be Serializable -> copy
1353                    List<Serializable> chunkTodo = new ArrayList<Serializable>(todo.subList(start, end));
1354                    List<NodeInfo> chunkInfos = getChildrenNodeInfos(chunkTodo);
1355                    infos.addAll(chunkInfos);
1356                }
1357            } else {
1358                infos = getChildrenNodeInfos(todo);
1359            }
1360            todo = new ArrayList<>();
1361            for (NodeInfo info : infos) {
1362                Serializable id = info.id;
1363                if (!done.add(id)) {
1364                    continue;
1365                }
1366                todo.add(id);
1367                descendants.add(info);
1368            }
1369        }
1370        return descendants;
1371    }
1372
1373    /**
1374     * Gets the children of a node as a list of NodeInfo.
1375     */
1376    protected List<NodeInfo> getChildrenNodeInfos(Collection<Serializable> ids) {
1377        List<NodeInfo> children = new LinkedList<NodeInfo>();
1378        SQLInfoSelect select = sqlInfo.getSelectChildrenNodeInfos(ids.size());
1379        if (logger.isLogEnabled()) {
1380            logger.logSQL(select.sql, ids);
1381        }
1382        Column where = select.whereColumns.get(0);
1383        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
1384            List<String> debugValues = null;
1385            if (logger.isLogEnabled()) {
1386                debugValues = new LinkedList<String>();
1387            }
1388            int ii = 1;
1389            for (Serializable id : ids) {
1390                where.setToPreparedStatement(ps, ii++, id);
1391            }
1392            try (ResultSet
1393            rs = ps.executeQuery()) {
1394                countExecute();
1395                while (rs.next()) {
1396                    Serializable id = null;
1397                    Serializable parentId = null;
1398                    String primaryType = null;
1399                    Boolean isProperty = Boolean.FALSE;
1400                    Serializable targetId = null;
1401                    Serializable versionableId = null;
1402                    int i = 1;
1403                    for (Column column : select.whatColumns) {
1404                        String key = column.getKey();
1405                        Serializable value = column.getFromResultSet(rs, i++);
1406                        if (key.equals(Model.MAIN_KEY)) {
1407                            id = value;
1408                        } else if (key.equals(Model.HIER_PARENT_KEY)) {
1409                            parentId = value;
1410                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1411                            primaryType = (String) value;
1412                        } else if (key.equals(Model.PROXY_TARGET_KEY)) {
1413                            targetId = value;
1414                        } else if (key.equals(Model.PROXY_VERSIONABLE_KEY)) {
1415                            versionableId = value;
1416                        }
1417                    }
1418                    children.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1419                    if (debugValues != null) {
1420                        if (debugValues.size() < DEBUG_MAX_TREE) {
1421                            debugValues.add(id + "/" + primaryType);
1422                        }
1423                    }
1424                }
1425            }
1426            if (debugValues != null) {
1427                if (debugValues.size() >= DEBUG_MAX_TREE) {
1428                    debugValues.add("... (" + children.size() + ") results");
1429                }
1430                logger.log("  -> " + debugValues);
1431            }
1432            return children;
1433        } catch (SQLException e) {
1434            throw new NuxeoException("Failed to get descendants", e);
1435        }
1436    }
1437
1438}