001/*
002 * (C) Copyright 2006-2016 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.sql.jdbc;
020
021import java.io.Serializable;
022import java.sql.Array;
023import java.sql.BatchUpdateException;
024import java.sql.CallableStatement;
025import java.sql.PreparedStatement;
026import java.sql.ResultSet;
027import java.sql.SQLException;
028import java.sql.Statement;
029import java.sql.Types;
030import java.util.ArrayList;
031import java.util.Arrays;
032import java.util.Calendar;
033import java.util.Collection;
034import java.util.Collections;
035import java.util.HashMap;
036import java.util.HashSet;
037import java.util.Iterator;
038import java.util.LinkedHashMap;
039import java.util.LinkedList;
040import java.util.List;
041import java.util.ListIterator;
042import java.util.Map;
043import java.util.Map.Entry;
044import java.util.Set;
045import java.util.stream.Collectors;
046
047import javax.transaction.xa.XAException;
048import javax.transaction.xa.Xid;
049
050import org.apache.commons.lang.StringUtils;
051import org.nuxeo.ecm.core.api.ConcurrentUpdateException;
052import org.nuxeo.ecm.core.api.NuxeoException;
053import org.nuxeo.ecm.core.api.model.Delta;
054import org.nuxeo.ecm.core.storage.sql.ClusterInvalidator;
055import org.nuxeo.ecm.core.storage.sql.Invalidations;
056import org.nuxeo.ecm.core.storage.sql.InvalidationsPropagator;
057import org.nuxeo.ecm.core.storage.sql.Model;
058import org.nuxeo.ecm.core.storage.sql.PropertyType;
059import org.nuxeo.ecm.core.storage.sql.Row;
060import org.nuxeo.ecm.core.storage.sql.RowId;
061import org.nuxeo.ecm.core.storage.sql.RowMapper;
062import org.nuxeo.ecm.core.storage.sql.SelectionType;
063import org.nuxeo.ecm.core.storage.sql.SimpleFragment;
064import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelect;
065import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelection;
066import org.nuxeo.ecm.core.storage.sql.jdbc.db.Column;
067import org.nuxeo.ecm.core.storage.sql.jdbc.db.Table;
068import org.nuxeo.ecm.core.storage.sql.jdbc.db.Update;
069import org.nuxeo.runtime.api.Framework;
070import org.nuxeo.runtime.services.config.ConfigurationService;
071
072/**
073 * A {@link JDBCRowMapper} maps {@link Row}s to and from a JDBC database.
074 */
075public class JDBCRowMapper extends JDBCConnection implements RowMapper {
076
077    public static final int UPDATE_BATCH_SIZE = 100; // also insert/delete
078
079    public static final int DEBUG_MAX_TREE = 50;
080
081    /** Property to determine whether collection appends delete all then re-insert, or are optimized for append. */
082    public static final String COLLECTION_DELETE_BEFORE_APPEND_PROP = "org.nuxeo.vcs.list-delete-before-append";
083
084    /**
085     * Cluster invalidator, or {@code null} if this mapper does not participate in invalidation propagation (cluster
086     * invalidator, lock manager).
087     */
088    private final ClusterInvalidator clusterInvalidator;
089
090    private final InvalidationsPropagator invalidationsPropagator;
091
092    private final boolean collectionDeleteBeforeAppend;
093
094    private final CollectionIO aclCollectionIO;
095
096    private final CollectionIO scalarCollectionIO;
097
098    public JDBCRowMapper(Model model, SQLInfo sqlInfo, ClusterInvalidator clusterInvalidator,
099            InvalidationsPropagator invalidationsPropagator) {
100        super(model, sqlInfo);
101        this.clusterInvalidator = clusterInvalidator;
102        this.invalidationsPropagator = invalidationsPropagator;
103        ConfigurationService configurationService = Framework.getService(ConfigurationService.class);
104        collectionDeleteBeforeAppend = configurationService.isBooleanPropertyTrue(COLLECTION_DELETE_BEFORE_APPEND_PROP);
105        aclCollectionIO = new ACLCollectionIO(collectionDeleteBeforeAppend);
106        scalarCollectionIO = new ScalarCollectionIO(collectionDeleteBeforeAppend);
107    }
108
109    @Override
110    public Invalidations receiveInvalidations() {
111        if (clusterInvalidator != null) {
112            Invalidations invalidations = clusterInvalidator.receiveInvalidations();
113            // send received invalidations to all mappers
114            if (invalidations != null && !invalidations.isEmpty()) {
115                invalidationsPropagator.propagateInvalidations(invalidations, null);
116            }
117            return invalidations;
118        } else {
119            return null;
120        }
121    }
122
123
124    @Override
125    public void sendInvalidations(Invalidations invalidations) {
126        if (clusterInvalidator != null) {
127            clusterInvalidator.sendInvalidations(invalidations);
128        }
129    }
130
131    @Override
132    public void clearCache() {
133        // no cache
134    }
135
136    @Override
137    public long getCacheSize() {
138        return 0;
139    }
140
141    @Override
142    public void rollback(Xid xid) throws XAException {
143        try {
144            xaresource.rollback(xid);
145        } catch (XAException e) {
146            logger.error("XA error on rollback: " + e);
147            throw e;
148        }
149    }
150
151    protected CollectionIO getCollectionIO(String tableName) {
152        return tableName.equals(Model.ACL_TABLE_NAME) ? aclCollectionIO : scalarCollectionIO;
153    }
154
155    @Override
156    public Serializable generateNewId() {
157        try {
158            return dialect.getGeneratedId(connection);
159        } catch (SQLException e) {
160            throw new NuxeoException(e);
161        }
162    }
163
164    /*
165     * ----- RowIO -----
166     */
167
168    @Override
169    public List<? extends RowId> read(Collection<RowId> rowIds, boolean cacheOnly) {
170        List<RowId> res = new ArrayList<RowId>(rowIds.size());
171        if (cacheOnly) {
172            // return no data
173            for (RowId rowId : rowIds) {
174                res.add(new RowId(rowId));
175            }
176            return res;
177        }
178        // reorganize by table
179        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
180        for (RowId rowId : rowIds) {
181            Set<Serializable> ids = tableIds.get(rowId.tableName);
182            if (ids == null) {
183                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
184            }
185            ids.add(rowId.id);
186        }
187        // read on each table
188        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
189            String tableName = en.getKey();
190            Set<Serializable> ids = new HashSet<Serializable>(en.getValue());
191            int size = ids.size();
192            int chunkSize = sqlInfo.getMaximumArgsForIn();
193            List<Row> rows;
194            if (size > chunkSize) {
195                List<Serializable> idList = new ArrayList<Serializable>(ids);
196                rows = new ArrayList<Row>(size);
197                for (int start = 0; start < size; start += chunkSize) {
198                    int end = start + chunkSize;
199                    if (end > size) {
200                        end = size;
201                    }
202                    // needs to be Serializable -> copy
203                    List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
204                    List<Row> chunkRows;
205                    if (model.isCollectionFragment(tableName)) {
206                        chunkRows = readCollectionArrays(tableName, chunkIds);
207                    } else {
208                        chunkRows = readSimpleRows(tableName, chunkIds);
209                    }
210                    rows.addAll(chunkRows);
211                }
212            } else {
213                if (model.isCollectionFragment(tableName)) {
214                    rows = readCollectionArrays(tableName, ids);
215                } else {
216                    rows = readSimpleRows(tableName, ids);
217                }
218            }
219            // check we have all the ids (readSimpleRows may have some
220            // missing)
221            for (Row row : rows) {
222                res.add(row);
223                ids.remove(row.id);
224            }
225            // for the missing ids record an empty RowId
226            for (Serializable id : ids) {
227                res.add(new RowId(tableName, id));
228            }
229        }
230        return res;
231    }
232
233    /**
234     * Gets a list of rows for {@link SimpleFragment}s from the database, given the table name and the ids.
235     *
236     * @param tableName the table name
237     * @param ids the ids
238     * @return the list of rows, without the missing ones
239     */
240    protected List<Row> readSimpleRows(String tableName, Collection<Serializable> ids) {
241        if (ids.isEmpty()) {
242            return Collections.emptyList();
243        }
244        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size());
245        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, (Serializable) ids);
246        return getSelectRows(tableName, select, criteriaMap, null, false);
247    }
248
249    /**
250     * Reads several collection rows, given a table name and the ids.
251     *
252     * @param tableName the table name
253     * @param ids the ids
254     */
255    protected List<Row> readCollectionArrays(String tableName, Collection<Serializable> ids) {
256        if (ids.isEmpty()) {
257            return Collections.emptyList();
258        }
259        String[] orderBys = { Model.MAIN_KEY, Model.COLL_TABLE_POS_KEY }; // clusters
260                                                                          // results
261        Set<String> skipColumns = new HashSet<String>(Arrays.asList(Model.COLL_TABLE_POS_KEY));
262        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size(), orderBys, skipColumns);
263
264        String sql = select.sql;
265        if (logger.isLogEnabled()) {
266            logger.logSQL(sql, ids);
267        }
268        try (PreparedStatement ps = connection.prepareStatement(sql)) {
269            int i = 1;
270            for (Serializable id : ids) {
271                dialect.setId(ps, i++, id);
272            }
273            try (ResultSet rs = ps.executeQuery()) {
274                countExecute();
275
276                // get all values from result set, separate by ids
277                // the result set is ordered by id, pos
278                CollectionIO io = getCollectionIO(tableName);
279                PropertyType ftype = model.getCollectionFragmentType(tableName);
280                PropertyType type = ftype.getArrayBaseType();
281                Serializable curId = null;
282                List<Serializable> list = null;
283                Serializable[] returnId = new Serializable[1];
284                int[] returnPos = { -1 };
285                List<Row> res = new LinkedList<Row>();
286                Set<Serializable> remainingIds = new HashSet<Serializable>(ids);
287                while (rs.next()) {
288                    Serializable value = io.getCurrentFromResultSet(rs, select.whatColumns, model, returnId, returnPos);
289                    Serializable newId = returnId[0];
290                    if (newId != null && !newId.equals(curId)) {
291                        // flush old list
292                        if (list != null) {
293                            res.add(new Row(tableName, curId, type.collectionToArray(list)));
294                            remainingIds.remove(curId);
295                        }
296                        curId = newId;
297                        list = new ArrayList<Serializable>();
298                    }
299                    list.add(value);
300                }
301                if (curId != null && list != null) {
302                    // flush last list
303                    res.add(new Row(tableName, curId, type.collectionToArray(list)));
304                    remainingIds.remove(curId);
305                }
306
307                // fill empty ones
308                if (!remainingIds.isEmpty()) {
309                    Serializable[] emptyArray = ftype.getEmptyArray();
310                    for (Serializable id : remainingIds) {
311                        res.add(new Row(tableName, id, emptyArray));
312                    }
313                }
314                if (logger.isLogEnabled()) {
315                    for (Row row : res) {
316                        logger.log("  -> " + row);
317                    }
318                }
319                return res;
320            }
321        } catch (SQLException e) {
322            throw new NuxeoException("Could not select: " + sql, e);
323        }
324    }
325
326    /**
327     * Fetches the rows for a select with fixed criteria given as two maps (a criteriaMap whose values and up in the
328     * returned rows, and a joinMap for other criteria).
329     */
330    protected List<Row> getSelectRows(String tableName, SQLInfoSelect select, Map<String, Serializable> criteriaMap,
331            Map<String, Serializable> joinMap, boolean limitToOne) {
332        List<Row> list = new LinkedList<Row>();
333        if (select.whatColumns.isEmpty()) {
334            // happens when we fetch a fragment whose columns are all opaque
335            // check it's a by-id query
336            if (select.whereColumns.size() == 1 && select.whereColumns.get(0).getKey() == Model.MAIN_KEY
337                    && joinMap == null) {
338                Row row = new Row(tableName, criteriaMap);
339                if (select.opaqueColumns != null) {
340                    for (Column column : select.opaqueColumns) {
341                        row.putNew(column.getKey(), Row.OPAQUE);
342                    }
343                }
344                list.add(row);
345                return list;
346            }
347            // else do a useless select but the criteria are more complex and we
348            // can't shortcut
349        }
350        if (joinMap == null) {
351            joinMap = Collections.emptyMap();
352        }
353        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
354
355            /*
356             * Compute where part.
357             */
358            List<Serializable> debugValues = null;
359            if (logger.isLogEnabled()) {
360                debugValues = new LinkedList<Serializable>();
361            }
362            int i = 1;
363            for (Column column : select.whereColumns) {
364                String key = column.getKey();
365                Serializable v;
366                if (criteriaMap.containsKey(key)) {
367                    v = criteriaMap.get(key);
368                } else if (joinMap.containsKey(key)) {
369                    v = joinMap.get(key);
370                } else {
371                    throw new RuntimeException(key);
372                }
373                if (v == null) {
374                    throw new NuxeoException("Null value for key: " + key);
375                }
376                if (v instanceof Collection<?>) {
377                    // allow insert of several values, for the IN (...) case
378                    for (Object vv : (Collection<?>) v) {
379                        column.setToPreparedStatement(ps, i++, (Serializable) vv);
380                        if (debugValues != null) {
381                            debugValues.add((Serializable) vv);
382                        }
383                    }
384                } else {
385                    column.setToPreparedStatement(ps, i++, v);
386                    if (debugValues != null) {
387                        debugValues.add(v);
388                    }
389                }
390            }
391            if (debugValues != null) {
392                logger.logSQL(select.sql, debugValues);
393            }
394
395            /*
396             * Execute query.
397             */
398            try (ResultSet rs = ps.executeQuery()) {
399                countExecute();
400
401                /*
402                 * Construct the maps from the result set.
403                 */
404                while (rs.next()) {
405                    // TODO using criteriaMap is wrong if it contains a Collection
406                    Row row = new Row(tableName, criteriaMap);
407                    i = 1;
408                    for (Column column : select.whatColumns) {
409                        row.put(column.getKey(), column.getFromResultSet(rs, i++));
410                    }
411                    if (select.opaqueColumns != null) {
412                        for (Column column : select.opaqueColumns) {
413                            row.putNew(column.getKey(), Row.OPAQUE);
414                        }
415                    }
416                    if (logger.isLogEnabled()) {
417                        logger.logResultSet(rs, select.whatColumns);
418                    }
419                    list.add(row);
420                    if (limitToOne) {
421                        return list;
422                    }
423                }
424            }
425            if (limitToOne) {
426                return Collections.emptyList();
427            }
428            return list;
429        } catch (SQLException e) {
430            checkConcurrentUpdate(e);
431            throw new NuxeoException("Could not select: " + select.sql, e);
432        }
433    }
434
435    @Override
436    public void write(RowBatch batch) {
437        // do deletes first to avoid violating constraint of unique child name in parent
438        // when replacing a complex list element
439        if (!batch.deletes.isEmpty()) {
440            writeDeletes(batch.deletes);
441        }
442        // batch.deletesDependent not executed
443        if (!batch.creates.isEmpty()) {
444            writeCreates(batch.creates);
445        }
446        if (!batch.updates.isEmpty()) {
447            writeUpdates(batch.updates);
448        }
449    }
450
451    protected void writeCreates(List<Row> creates) {
452        // reorganize by table
453        Map<String, List<Row>> tableRows = new LinkedHashMap<String, List<Row>>();
454        // hierarchy table first because there are foreign keys to it
455        tableRows.put(Model.HIER_TABLE_NAME, new LinkedList<Row>());
456        for (Row row : creates) {
457            List<Row> rows = tableRows.get(row.tableName);
458            if (rows == null) {
459                tableRows.put(row.tableName, rows = new LinkedList<Row>());
460            }
461            rows.add(row);
462        }
463        // inserts on each table
464        for (Entry<String, List<Row>> en : tableRows.entrySet()) {
465            String tableName = en.getKey();
466            List<Row> rows = en.getValue();
467            if (model.isCollectionFragment(tableName)) {
468                List<RowUpdate> rowus = rows.stream().map(RowUpdate::new).collect(Collectors.toList());
469                insertCollectionRows(tableName, rowus);
470            } else {
471                insertSimpleRows(tableName, rows);
472            }
473        }
474    }
475
476    protected void writeUpdates(Set<RowUpdate> updates) {
477        // reorganize by table
478        Map<String, List<RowUpdate>> tableRows = new HashMap<String, List<RowUpdate>>();
479        for (RowUpdate rowu : updates) {
480            List<RowUpdate> rows = tableRows.get(rowu.row.tableName);
481            if (rows == null) {
482                tableRows.put(rowu.row.tableName, rows = new LinkedList<RowUpdate>());
483            }
484            rows.add(rowu);
485        }
486        // updates on each table
487        for (Entry<String, List<RowUpdate>> en : tableRows.entrySet()) {
488            String tableName = en.getKey();
489            List<RowUpdate> rows = en.getValue();
490            if (model.isCollectionFragment(tableName)) {
491                updateCollectionRows(tableName, rows);
492            } else {
493                updateSimpleRows(tableName, rows);
494            }
495        }
496    }
497
498    protected void writeDeletes(Collection<RowId> deletes) {
499        // reorganize by table
500        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
501        for (RowId rowId : deletes) {
502            Set<Serializable> ids = tableIds.get(rowId.tableName);
503            if (ids == null) {
504                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
505            }
506            ids.add(rowId.id);
507        }
508        // delete on each table
509        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
510            String tableName = en.getKey();
511            Set<Serializable> ids = en.getValue();
512            deleteRows(tableName, ids);
513        }
514    }
515
516    /**
517     * Inserts multiple rows, all for the same table.
518     */
519    protected void insertSimpleRows(String tableName, List<Row> rows) {
520        if (rows.isEmpty()) {
521            return;
522        }
523        String sql = sqlInfo.getInsertSql(tableName);
524        if (sql == null) {
525            throw new NuxeoException("Unknown table: " + tableName);
526        }
527        boolean batched = supportsBatchUpdates && rows.size() > 1;
528        String loggedSql = batched ? sql + " -- BATCHED" : sql;
529        List<Column> columns = sqlInfo.getInsertColumns(tableName);
530        try (PreparedStatement ps = connection.prepareStatement(sql)) {
531            int batch = 0;
532            for (Iterator<Row> rowIt = rows.iterator(); rowIt.hasNext();) {
533                Row row = rowIt.next();
534                if (logger.isLogEnabled()) {
535                    logger.logSQL(loggedSql, columns, row);
536                }
537                int i = 1;
538                for (Column column : columns) {
539                    column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
540                }
541                if (batched) {
542                    ps.addBatch();
543                    batch++;
544                    if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
545                        ps.executeBatch();
546                        countExecute();
547                    }
548                } else {
549                    ps.execute();
550                    countExecute();
551                }
552            }
553        } catch (SQLException e) {
554            if (e instanceof BatchUpdateException) {
555                BatchUpdateException bue = (BatchUpdateException) e;
556                if (e.getCause() == null && bue.getNextException() != null) {
557                    // provide a readable cause in the stack trace
558                    e.initCause(bue.getNextException());
559                }
560            }
561            checkConcurrentUpdate(e);
562            throw new NuxeoException("Could not insert: " + sql, e);
563        }
564    }
565
566    /**
567     * Updates multiple collection rows, all for the same table.
568     */
569    protected void insertCollectionRows(String tableName, List<RowUpdate> rowus) {
570        if (rowus.isEmpty()) {
571            return;
572        }
573        String sql = sqlInfo.getInsertSql(tableName);
574        List<Column> columns = sqlInfo.getInsertColumns(tableName);
575        CollectionIO io = getCollectionIO(tableName);
576        try (PreparedStatement ps = connection.prepareStatement(sql)) {
577            io.executeInserts(ps, rowus, columns, supportsBatchUpdates, sql, this);
578        } catch (SQLException e) {
579            checkConcurrentUpdate(e);
580            throw new NuxeoException("Could not insert: " + sql, e);
581        }
582    }
583
584    /**
585     * Updates multiple simple rows, all for the same table.
586     */
587    protected void updateSimpleRows(String tableName, List<RowUpdate> rows) {
588        if (rows.isEmpty()) {
589            return;
590        }
591
592        // reorganize by identical queries to allow batching
593        Map<String, SQLInfoSelect> sqlToInfo = new HashMap<>();
594        Map<String, List<RowUpdate>> sqlRowUpdates = new HashMap<>();
595        for (RowUpdate rowu : rows) {
596            SQLInfoSelect update = sqlInfo.getUpdateById(tableName, rowu);
597            String sql = update.sql;
598            sqlToInfo.put(sql, update);
599            sqlRowUpdates.computeIfAbsent(sql, k -> new ArrayList<RowUpdate>()).add(rowu);
600        }
601
602        for (Entry<String, List<RowUpdate>> en : sqlRowUpdates.entrySet()) {
603            String sql = en.getKey();
604            List<RowUpdate> rowUpdates = en.getValue();
605            SQLInfoSelect update = sqlToInfo.get(sql);
606            boolean changeTokenEnabled = model.getRepositoryDescriptor().isChangeTokenEnabled();
607            boolean batched = supportsBatchUpdates && rowUpdates.size() > 1
608                    && (dialect.supportsBatchUpdateCount() || !changeTokenEnabled);
609            String loggedSql = batched ? update.sql + " -- BATCHED" : update.sql;
610            try (PreparedStatement ps = connection.prepareStatement(update.sql)) {
611                int batch = 0;
612                for (Iterator<RowUpdate> rowIt = rowUpdates.iterator(); rowIt.hasNext();) {
613                    RowUpdate rowu = rowIt.next();
614                    if (logger.isLogEnabled()) {
615                        logger.logSQL(loggedSql, update.whatColumns, rowu.row, update.whereColumns, rowu.conditions);
616                    }
617                    int i = 1;
618                    for (Column column : update.whatColumns) {
619                        Serializable value = rowu.row.get(column.getKey());
620                        if (value instanceof Delta) {
621                            value = ((Delta) value).getDeltaValue();
622                        }
623                        column.setToPreparedStatement(ps, i++, value);
624                    }
625                    boolean hasConditions = false;
626                    for (Column column : update.whereColumns) {
627                        // id or condition
628                        String key = column.getKey();
629                        Serializable value;
630                        if (key.equals(Model.MAIN_KEY)) {
631                            value = rowu.row.get(key);
632                        } else {
633                            hasConditions = true;
634                            value = rowu.conditions.get(key);
635                        }
636                        column.setToPreparedStatement(ps, i++, value);
637                    }
638                    if (batched) {
639                        ps.addBatch();
640                        batch++;
641                        if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) {
642                            int[] counts = ps.executeBatch();
643                            countExecute();
644                            if (changeTokenEnabled && hasConditions) {
645                                for (int j = 0; j < counts.length; j++) {
646                                    int count = counts[j];
647                                    if (count != Statement.SUCCESS_NO_INFO && count != 1) {
648                                        Serializable id = rowUpdates.get(j).row.id;
649                                        logger.log("  -> CONCURRENT UPDATE: " + id);
650                                        throw new ConcurrentUpdateException(id.toString());
651                                    }
652                                }
653                            }
654                        }
655                    } else {
656                        int count = ps.executeUpdate();
657                        countExecute();
658                        if (changeTokenEnabled && hasConditions) {
659                            if (count != Statement.SUCCESS_NO_INFO && count != 1) {
660                                Serializable id = rowu.row.id;
661                                logger.log("  -> CONCURRENT UPDATE: " + id);
662                                throw new ConcurrentUpdateException(id.toString());
663                            }
664                        }
665                    }
666                }
667            } catch (SQLException e) {
668                checkConcurrentUpdate(e);
669                throw new NuxeoException("Could not update: " + update.sql, e);
670            }
671        }
672    }
673
674    protected void updateCollectionRows(String tableName, List<RowUpdate> rowus) {
675        Set<Serializable> deleteIds = new HashSet<>();
676        for (RowUpdate rowu : rowus) {
677            if (rowu.pos == -1 || collectionDeleteBeforeAppend) {
678                deleteIds.add(rowu.row.id);
679            }
680        }
681        deleteRows(tableName, deleteIds);
682        insertCollectionRows(tableName, rowus);
683    }
684
685    /**
686     * Deletes multiple rows, all for the same table.
687     */
688    protected void deleteRows(String tableName, Set<Serializable> ids) {
689        if (ids.isEmpty()) {
690            return;
691        }
692        int size = ids.size();
693        int chunkSize = sqlInfo.getMaximumArgsForIn();
694        if (size > chunkSize) {
695            List<Serializable> idList = new ArrayList<Serializable>(ids);
696            for (int start = 0; start < size; start += chunkSize) {
697                int end = start + chunkSize;
698                if (end > size) {
699                    end = size;
700                }
701                // needs to be Serializable -> copy
702                List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
703                deleteRowsDirect(tableName, chunkIds);
704            }
705        } else {
706            deleteRowsDirect(tableName, ids);
707        }
708    }
709
710    protected void deleteRowsSoft(List<NodeInfo> nodeInfos) {
711        try {
712            int size = nodeInfos.size();
713            List<Serializable> ids = new ArrayList<Serializable>(size);
714            for (NodeInfo info : nodeInfos) {
715                ids.add(info.id);
716            }
717            int chunkSize = 100; // max size of ids array
718            if (size <= chunkSize) {
719                doSoftDeleteRows(ids);
720            } else {
721                for (int start = 0; start < size;) {
722                    int end = start + chunkSize;
723                    if (end > size) {
724                        end = size;
725                    }
726                    doSoftDeleteRows(ids.subList(start, end));
727                    start = end;
728                }
729            }
730        } catch (SQLException e) {
731            throw new NuxeoException("Could not soft delete", e);
732        }
733    }
734
735    // not chunked
736    protected void doSoftDeleteRows(List<Serializable> ids) throws SQLException {
737        Serializable whereIds = newIdArray(ids);
738        Calendar now = Calendar.getInstance();
739        String sql = sqlInfo.getSoftDeleteSql();
740        if (logger.isLogEnabled()) {
741            logger.logSQL(sql, Arrays.asList(whereIds, now));
742        }
743        try (PreparedStatement ps = connection.prepareStatement(sql)) {
744            setToPreparedStatementIdArray(ps, 1, whereIds);
745            dialect.setToPreparedStatementTimestamp(ps, 2, now, null);
746            ps.execute();
747            countExecute();
748            return;
749        }
750    }
751
752    protected Serializable newIdArray(Collection<Serializable> ids) {
753        if (dialect.supportsArrays()) {
754            return ids.toArray(); // Object[]
755        } else {
756            // join with '|'
757            StringBuilder b = new StringBuilder();
758            for (Serializable id : ids) {
759                b.append(id);
760                b.append('|');
761            }
762            b.setLength(b.length() - 1);
763            return b.toString();
764        }
765    }
766
767    protected void setToPreparedStatementIdArray(PreparedStatement ps, int index, Serializable idArray)
768            throws SQLException {
769        if (idArray instanceof String) {
770            ps.setString(index, (String) idArray);
771        } else {
772            Array array = dialect.createArrayOf(Types.OTHER, (Object[]) idArray, connection);
773            ps.setArray(index, array);
774        }
775    }
776
777    /**
778     * Clean up soft-deleted rows.
779     * <p>
780     * Rows deleted more recently than the beforeTime are left alone. Only a limited number of rows may be deleted, to
781     * prevent transaction during too long.
782     *
783     * @param max the maximum number of rows to delete at a time
784     * @param beforeTime the maximum deletion time of the rows to delete
785     * @return the number of rows deleted
786     */
787    public int cleanupDeletedRows(int max, Calendar beforeTime) {
788        if (max < 0) {
789            max = 0;
790        }
791        String sql = sqlInfo.getSoftDeleteCleanupSql();
792        if (logger.isLogEnabled()) {
793            logger.logSQL(sql, Arrays.<Serializable> asList(beforeTime, Long.valueOf(max)));
794        }
795        try {
796            if (sql.startsWith("{")) {
797                // callable statement
798                boolean outFirst = sql.startsWith("{?=");
799                int outIndex = outFirst ? 1 : 3;
800                int inIndex = outFirst ? 2 : 1;
801                try (CallableStatement cs = connection.prepareCall(sql)) {
802                    cs.setInt(inIndex, max);
803                    dialect.setToPreparedStatementTimestamp(cs, inIndex + 1, beforeTime, null);
804                    cs.registerOutParameter(outIndex, Types.INTEGER);
805                    cs.execute();
806                    int count = cs.getInt(outIndex);
807                    logger.logCount(count);
808                    return count;
809                }
810            } else {
811                // standard prepared statement with result set
812                try (PreparedStatement ps = connection.prepareStatement(sql)) {
813                    ps.setInt(1, max);
814                    dialect.setToPreparedStatementTimestamp(ps, 2, beforeTime, null);
815                    try (ResultSet rs = ps.executeQuery()) {
816                        countExecute();
817                        if (!rs.next()) {
818                            throw new NuxeoException("Cannot get result");
819                        }
820                        int count = rs.getInt(1);
821                        logger.logCount(count);
822                        return count;
823                    }
824                }
825            }
826        } catch (SQLException e) {
827            throw new NuxeoException("Could not purge soft delete", e);
828        }
829    }
830
831    protected void deleteRowsDirect(String tableName, Collection<Serializable> ids) {
832        String sql = sqlInfo.getDeleteSql(tableName, ids.size());
833        if (logger.isLogEnabled()) {
834            logger.logSQL(sql, ids);
835        }
836        try (PreparedStatement ps = connection.prepareStatement(sql)) {
837            int i = 1;
838            for (Serializable id : ids) {
839                dialect.setId(ps, i++, id);
840            }
841            int count = ps.executeUpdate();
842            countExecute();
843            logger.logCount(count);
844        } catch (SQLException e) {
845            checkConcurrentUpdate(e);
846            throw new NuxeoException("Could not delete: " + tableName, e);
847        }
848    }
849
850    @Override
851    public Row readSimpleRow(RowId rowId) {
852        SQLInfoSelect select = sqlInfo.selectFragmentById.get(rowId.tableName);
853        Map<String, Serializable> criteriaMap = Collections.singletonMap(Model.MAIN_KEY, rowId.id);
854        List<Row> maps = getSelectRows(rowId.tableName, select, criteriaMap, null, true);
855        return maps.isEmpty() ? null : maps.get(0);
856    }
857
858    @Override
859    public Map<String, String> getBinaryFulltext(RowId rowId) {
860        ArrayList<String> columns = new ArrayList<String>();
861        for (String index : model.getFulltextConfiguration().indexesAllBinary) {
862            String col = Model.FULLTEXT_BINARYTEXT_KEY + model.getFulltextIndexSuffix(index);
863            columns.add(col);
864        }
865        Serializable id = rowId.id;
866        Map<String, String> ret = new HashMap<String, String>(columns.size());
867        String sql = dialect.getBinaryFulltextSql(columns);
868        if (sql == null) {
869            logger.info("getBinaryFulltextSql not supported for dialect " + dialect);
870            return ret;
871        }
872        if (logger.isLogEnabled()) {
873            logger.logSQL(sql, Collections.singletonList(id));
874        }
875        try (PreparedStatement ps = connection.prepareStatement(sql)) {
876            dialect.setId(ps, 1, id);
877            try (ResultSet rs = ps.executeQuery()) {
878                while (rs.next()) {
879                    for (int i = 1; i <= columns.size(); i++) {
880                        ret.put(columns.get(i - 1), rs.getString(i));
881                    }
882                }
883                if (logger.isLogEnabled()) {
884                    logger.log("  -> " + ret);
885                }
886            }
887            return ret;
888        } catch (SQLException e) {
889            throw new NuxeoException("Could not select: " + sql, e);
890        }
891    }
892
893    @Override
894    public Serializable[] readCollectionRowArray(RowId rowId) {
895        String tableName = rowId.tableName;
896        Serializable id = rowId.id;
897        String sql = sqlInfo.selectFragmentById.get(tableName).sql;
898        if (logger.isLogEnabled()) {
899            logger.logSQL(sql, Collections.singletonList(id));
900        }
901        try (PreparedStatement ps = connection.prepareStatement(sql)) {
902            List<Column> columns = sqlInfo.selectFragmentById.get(tableName).whatColumns;
903            dialect.setId(ps, 1, id); // assumes only one primary column
904            try (ResultSet rs = ps.executeQuery()) {
905                countExecute();
906
907                // construct the resulting collection using each row
908                CollectionIO io = getCollectionIO(tableName);
909                List<Serializable> list = new ArrayList<Serializable>();
910                Serializable[] returnId = new Serializable[1];
911                int[] returnPos = { -1 };
912                while (rs.next()) {
913                    list.add(io.getCurrentFromResultSet(rs, columns, model, returnId, returnPos));
914                }
915                PropertyType type = model.getCollectionFragmentType(tableName).getArrayBaseType();
916                Serializable[] array = type.collectionToArray(list);
917
918                if (logger.isLogEnabled()) {
919                    logger.log("  -> " + Arrays.asList(array));
920                }
921                return array;
922            }
923        } catch (SQLException e) {
924            throw new NuxeoException("Could not select: " + sql, e);
925        }
926    }
927
928    @Override
929    public List<Row> readSelectionRows(SelectionType selType, Serializable selId, Serializable filter,
930            Serializable criterion, boolean limitToOne) {
931        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
932        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
933        criteriaMap.put(selType.selKey, selId);
934        SQLInfoSelect select;
935        if (filter == null) {
936            select = selInfo.selectAll;
937        } else {
938            select = selInfo.selectFiltered;
939            criteriaMap.put(selType.filterKey, filter);
940        }
941        if (selType.criterionKey != null) {
942            criteriaMap.put(selType.criterionKey, criterion);
943        }
944        return getSelectRows(selType.tableName, select, criteriaMap, null, limitToOne);
945    }
946
947    @Override
948    public Set<Serializable> readSelectionsIds(SelectionType selType, List<Serializable> values) {
949        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
950        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
951        Set<Serializable> ids = new HashSet<>();
952        int size = values.size();
953        int chunkSize = sqlInfo.getMaximumArgsForIn();
954        if (size > chunkSize) {
955            for (int start = 0; start < size; start += chunkSize) {
956                int end = start + chunkSize;
957                if (end > size) {
958                    end = size;
959                }
960                // needs to be Serializable -> copy
961                List<Serializable> chunkTodo = new ArrayList<Serializable>(values.subList(start, end));
962                criteriaMap.put(selType.selKey, (Serializable) chunkTodo);
963                SQLInfoSelect select = selInfo.getSelectSelectionIds(chunkTodo.size());
964                List<Row> rows = getSelectRows(selType.tableName, select, criteriaMap, null, false);
965                rows.forEach(row -> ids.add(row.id));
966            }
967        } else {
968            criteriaMap.put(selType.selKey, (Serializable) values);
969            SQLInfoSelect select = selInfo.getSelectSelectionIds(values.size());
970            List<Row> rows = getSelectRows(selType.tableName, select, criteriaMap, null, false);
971            rows.forEach(row -> ids.add(row.id));
972        }
973        return ids;
974    }
975
976    @Override
977    public CopyResult copy(IdWithTypes source, Serializable destParentId, String destName, Row overwriteRow) {
978        // assert !model.separateMainTable; // other case not implemented
979        Invalidations invalidations = new Invalidations();
980        try {
981            Map<Serializable, Serializable> idMap = new LinkedHashMap<Serializable, Serializable>();
982            Map<Serializable, IdWithTypes> idToTypes = new HashMap<Serializable, IdWithTypes>();
983            // copy the hierarchy fragments recursively
984            Serializable overwriteId = overwriteRow == null ? null : overwriteRow.id;
985            if (overwriteId != null) {
986                // overwrite hier root with explicit values
987                String tableName = Model.HIER_TABLE_NAME;
988                updateSimpleRowWithValues(tableName, overwriteRow);
989                idMap.put(source.id, overwriteId);
990                // invalidate
991                invalidations.addModified(new RowId(tableName, overwriteId));
992            }
993            // create the new hierarchy by copy
994            boolean resetVersion = destParentId != null;
995            Serializable newRootId = copyHierRecursive(source, destParentId, destName, overwriteId, resetVersion,
996                    idMap, idToTypes);
997            // invalidate children
998            Serializable invalParentId = overwriteId == null ? destParentId : overwriteId;
999            if (invalParentId != null) { // null for a new version
1000                invalidations.addModified(new RowId(Invalidations.PARENT, invalParentId));
1001            }
1002            // copy all collected fragments
1003            Set<Serializable> proxyIds = new HashSet<Serializable>();
1004            for (Entry<String, Set<Serializable>> entry : model.getPerFragmentIds(idToTypes).entrySet()) {
1005                String tableName = entry.getKey();
1006                if (tableName.equals(Model.HIER_TABLE_NAME)) {
1007                    // already done
1008                    continue;
1009                }
1010                if (tableName.equals(Model.VERSION_TABLE_NAME)) {
1011                    // versions not fileable
1012                    // restore must not copy versions either
1013                    continue;
1014                }
1015                Set<Serializable> ids = entry.getValue();
1016                if (tableName.equals(Model.PROXY_TABLE_NAME)) {
1017                    for (Serializable id : ids) {
1018                        proxyIds.add(idMap.get(id)); // copied ids
1019                    }
1020                }
1021                Boolean invalidation = copyRows(tableName, ids, idMap, overwriteId);
1022                if (invalidation != null) {
1023                    // overwrote something
1024                    // make sure things are properly invalidated in this and
1025                    // other sessions
1026                    if (Boolean.TRUE.equals(invalidation)) {
1027                        invalidations.addModified(new RowId(tableName, overwriteId));
1028                    } else {
1029                        invalidations.addDeleted(new RowId(tableName, overwriteId));
1030                    }
1031                }
1032            }
1033            return new CopyResult(newRootId, invalidations, proxyIds);
1034        } catch (SQLException e) {
1035            throw new NuxeoException("Could not copy: " + source.id.toString(), e);
1036        }
1037    }
1038
1039    /**
1040     * Updates a row in the database with given explicit values.
1041     */
1042    protected void updateSimpleRowWithValues(String tableName, Row row) {
1043        Update update = sqlInfo.getUpdateByIdForKeys(tableName, row.getKeys());
1044        Table table = update.getTable();
1045        String sql = update.getStatement();
1046        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1047            if (logger.isLogEnabled()) {
1048                List<Serializable> values = new LinkedList<Serializable>();
1049                values.addAll(row.getValues());
1050                values.add(row.id); // id last in SQL
1051                logger.logSQL(sql, values);
1052            }
1053            int i = 1;
1054            List<String> keys = row.getKeys();
1055            List<Serializable> values = row.getValues();
1056            int size = keys.size();
1057            for (int r = 0; r < size; r++) {
1058                String key = keys.get(r);
1059                Serializable value = values.get(r);
1060                table.getColumn(key).setToPreparedStatement(ps, i++, value);
1061            }
1062            dialect.setId(ps, i, row.id); // id last in SQL
1063            int count = ps.executeUpdate();
1064            countExecute();
1065        } catch (SQLException e) {
1066            throw new NuxeoException("Could not update: " + sql, e);
1067        }
1068    }
1069
1070    /**
1071     * Copies hierarchy from id to parentId, and recurses.
1072     * <p>
1073     * If name is {@code null}, then the original name is kept.
1074     * <p>
1075     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1076     * filled with the type of each (source) fragment.
1077     * <p>
1078     * TODO: this should be optimized to use a stored procedure.
1079     *
1080     * @param overwriteId when not {@code null}, the copy is done onto this existing node (skipped)
1081     * @return the new root id
1082     */
1083    protected Serializable copyHierRecursive(IdWithTypes source, Serializable parentId, String name,
1084            Serializable overwriteId, boolean resetVersion, Map<Serializable, Serializable> idMap,
1085            Map<Serializable, IdWithTypes> idToTypes) throws SQLException {
1086        idToTypes.put(source.id, source);
1087        Serializable newId;
1088        if (overwriteId == null) {
1089            newId = copyHier(source.id, parentId, name, resetVersion, idMap);
1090        } else {
1091            newId = overwriteId;
1092            idMap.put(source.id, newId);
1093        }
1094        // recurse in children
1095        boolean onlyComplex = parentId == null;
1096        for (IdWithTypes child : getChildrenIdsWithTypes(source.id, onlyComplex)) {
1097            copyHierRecursive(child, newId, null, null, resetVersion, idMap, idToTypes);
1098        }
1099        return newId;
1100    }
1101
1102    /**
1103     * Copies hierarchy from id to a new child of parentId.
1104     * <p>
1105     * If name is {@code null}, then the original name is kept.
1106     * <p>
1107     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1108     * filled with the type of each (source) fragment.
1109     *
1110     * @return the new id
1111     */
1112    protected Serializable copyHier(Serializable id, Serializable parentId, String name, boolean resetVersion,
1113            Map<Serializable, Serializable> idMap) throws SQLException {
1114        boolean explicitName = name != null;
1115
1116        SQLInfoSelect copy = sqlInfo.getCopyHier(explicitName, resetVersion);
1117        try (PreparedStatement ps = connection.prepareStatement(copy.sql)) {
1118            Serializable newId = generateNewId();
1119
1120            List<Serializable> debugValues = null;
1121            if (logger.isLogEnabled()) {
1122                debugValues = new ArrayList<Serializable>(4);
1123            }
1124            int i = 1;
1125            for (Column column : copy.whatColumns) {
1126                String key = column.getKey();
1127                Serializable v;
1128                if (key.equals(Model.HIER_PARENT_KEY)) {
1129                    v = parentId;
1130                } else if (key.equals(Model.HIER_CHILD_NAME_KEY)) {
1131                    // present if name explicitely set (first iteration)
1132                    v = name;
1133                } else if (key.equals(Model.MAIN_KEY)) {
1134                    // present if APP_UUID generation
1135                    v = newId;
1136                } else if (key.equals(Model.MAIN_BASE_VERSION_KEY) || key.equals(Model.MAIN_CHECKED_IN_KEY)) {
1137                    v = null;
1138                } else if (key.equals(Model.MAIN_MINOR_VERSION_KEY) || key.equals(Model.MAIN_MAJOR_VERSION_KEY)) {
1139                    // present if reset version (regular copy, not checkin)
1140                    v = null;
1141                } else {
1142                    throw new RuntimeException(column.toString());
1143                }
1144                column.setToPreparedStatement(ps, i++, v);
1145                if (debugValues != null) {
1146                    debugValues.add(v);
1147                }
1148            }
1149            // last parameter is for 'WHERE "id" = ?'
1150            Column whereColumn = copy.whereColumns.get(0);
1151            whereColumn.setToPreparedStatement(ps, i, id);
1152            if (debugValues != null) {
1153                debugValues.add(id);
1154                logger.logSQL(copy.sql, debugValues);
1155            }
1156            int count = ps.executeUpdate();
1157            countExecute();
1158
1159            // TODO DB_IDENTITY
1160            // post insert fetch idrow
1161
1162            idMap.put(id, newId);
1163            return newId;
1164        }
1165    }
1166
1167    /**
1168     * Gets the children ids and types of a node.
1169     */
1170    protected List<IdWithTypes> getChildrenIdsWithTypes(Serializable id, boolean onlyComplex) throws SQLException {
1171        List<IdWithTypes> children = new LinkedList<IdWithTypes>();
1172        String sql = sqlInfo.getSelectChildrenIdsAndTypesSql(onlyComplex);
1173        if (logger.isLogEnabled()) {
1174            logger.logSQL(sql, Collections.singletonList(id));
1175        }
1176        List<Column> columns = sqlInfo.getSelectChildrenIdsAndTypesWhatColumns();
1177        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1178            List<String> debugValues = null;
1179            if (logger.isLogEnabled()) {
1180                debugValues = new LinkedList<String>();
1181            }
1182            dialect.setId(ps, 1, id); // parent id
1183            try (ResultSet rs = ps.executeQuery()) {
1184                countExecute();
1185                while (rs.next()) {
1186                    Serializable childId = null;
1187                    String childPrimaryType = null;
1188                    String[] childMixinTypes = null;
1189                    int i = 1;
1190                    for (Column column : columns) {
1191                        String key = column.getKey();
1192                        Serializable value = column.getFromResultSet(rs, i++);
1193                        if (key.equals(Model.MAIN_KEY)) {
1194                            childId = value;
1195                        } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1196                            childPrimaryType = (String) value;
1197                        } else if (key.equals(Model.MAIN_MIXIN_TYPES_KEY)) {
1198                            childMixinTypes = (String[]) value;
1199                        }
1200                    }
1201                    children.add(new IdWithTypes(childId, childPrimaryType, childMixinTypes));
1202                    if (debugValues != null) {
1203                        debugValues.add(childId + "/" + childPrimaryType + "/" + Arrays.toString(childMixinTypes));
1204                    }
1205                }
1206            }
1207            if (debugValues != null) {
1208                logger.log("  -> " + debugValues);
1209            }
1210            return children;
1211        }
1212    }
1213
1214    /**
1215     * Copy the rows from tableName with given ids into new ones with new ids given by idMap.
1216     * <p>
1217     * A new row with id {@code overwriteId} is first deleted.
1218     *
1219     * @return {@link Boolean#TRUE} for a modification or creation, {@link Boolean#FALSE} for a deletion, {@code null}
1220     *         otherwise (still absent)
1221     * @throws SQLException
1222     */
1223    protected Boolean copyRows(String tableName, Set<Serializable> ids, Map<Serializable, Serializable> idMap,
1224            Serializable overwriteId) throws SQLException {
1225        String copySql = sqlInfo.getCopySql(tableName);
1226        Column copyIdColumn = sqlInfo.getCopyIdColumn(tableName);
1227        String deleteSql = sqlInfo.getDeleteSql(tableName);
1228        try (PreparedStatement copyPs = connection.prepareStatement(copySql);
1229                PreparedStatement deletePs = connection.prepareStatement(deleteSql)) {
1230            boolean before = false;
1231            boolean after = false;
1232            for (Serializable id : ids) {
1233                Serializable newId = idMap.get(id);
1234                boolean overwrite = newId.equals(overwriteId);
1235                if (overwrite) {
1236                    // remove existing first
1237                    if (logger.isLogEnabled()) {
1238                        logger.logSQL(deleteSql, Collections.singletonList(newId));
1239                    }
1240                    dialect.setId(deletePs, 1, newId);
1241                    int delCount = deletePs.executeUpdate();
1242                    countExecute();
1243                    before = delCount > 0;
1244                }
1245                copyIdColumn.setToPreparedStatement(copyPs, 1, newId);
1246                copyIdColumn.setToPreparedStatement(copyPs, 2, id);
1247                if (logger.isLogEnabled()) {
1248                    logger.logSQL(copySql, Arrays.asList(newId, id));
1249                }
1250                int copyCount = copyPs.executeUpdate();
1251                countExecute();
1252                if (overwrite) {
1253                    after = copyCount > 0;
1254                }
1255            }
1256            // * , n -> mod (TRUE)
1257            // n , 0 -> del (FALSE)
1258            // 0 , 0 -> null
1259            return after ? Boolean.TRUE : (before ? Boolean.FALSE : null);
1260        }
1261    }
1262
1263    @Override
1264    public void remove(Serializable rootId, List<NodeInfo> nodeInfos) {
1265        if (sqlInfo.softDeleteEnabled) {
1266            deleteRowsSoft(nodeInfos);
1267        } else {
1268            deleteRowsDirect(Model.HIER_TABLE_NAME, Collections.singleton(rootId));
1269        }
1270    }
1271
1272    @Override
1273    public List<NodeInfo> getDescendantsInfo(Serializable rootId) {
1274        if (!dialect.supportsFastDescendants()) {
1275            return getDescendantsInfoIterative(rootId);
1276        }
1277        List<NodeInfo> descendants = new LinkedList<NodeInfo>();
1278        String sql = sqlInfo.getSelectDescendantsInfoSql();
1279        if (logger.isLogEnabled()) {
1280            logger.logSQL(sql, Collections.singletonList(rootId));
1281        }
1282        List<Column> columns = sqlInfo.getSelectDescendantsInfoWhatColumns();
1283        try (PreparedStatement ps = connection.prepareStatement(sql)) {
1284            List<String> debugValues = null;
1285            if (logger.isLogEnabled()) {
1286                debugValues = new LinkedList<String>();
1287            }
1288            dialect.setId(ps, 1, rootId); // parent id
1289            try (ResultSet rs = ps.executeQuery()) {
1290                countExecute();
1291                while (rs.next()) {
1292                    NodeInfo info = getNodeInfo(rs, columns);
1293                    descendants.add(info);
1294                    if (debugValues != null) {
1295                        if (debugValues.size() < DEBUG_MAX_TREE) {
1296                            debugValues.add(info.id + "/" + info.primaryType);
1297                        }
1298                    }
1299                }
1300            }
1301            if (debugValues != null) {
1302                if (debugValues.size() >= DEBUG_MAX_TREE) {
1303                    debugValues.add("... (" + descendants.size() + ") results");
1304                }
1305                logger.log("  -> " + debugValues);
1306            }
1307            return descendants;
1308        } catch (SQLException e) {
1309            throw new NuxeoException("Failed to get descendants", e);
1310        }
1311    }
1312
1313    protected List<NodeInfo> getDescendantsInfoIterative(Serializable rootId) {
1314        Set<Serializable> done = new HashSet<>();
1315        List<Serializable> todo = new ArrayList<>(Collections.singleton(rootId));
1316        List<NodeInfo> descendants = new ArrayList<NodeInfo>();
1317        while (!todo.isEmpty()) {
1318            List<NodeInfo> infos;
1319            int size = todo.size();
1320            int chunkSize = sqlInfo.getMaximumArgsForIn();
1321            if (size > chunkSize) {
1322                infos = new ArrayList<>();
1323                for (int start = 0; start < size; start += chunkSize) {
1324                    int end = start + chunkSize;
1325                    if (end > size) {
1326                        end = size;
1327                    }
1328                    // needs to be Serializable -> copy
1329                    List<Serializable> chunkTodo = new ArrayList<Serializable>(todo.subList(start, end));
1330                    List<NodeInfo> chunkInfos = getChildrenNodeInfos(chunkTodo);
1331                    infos.addAll(chunkInfos);
1332                }
1333            } else {
1334                infos = getChildrenNodeInfos(todo);
1335            }
1336            todo = new ArrayList<>();
1337            for (NodeInfo info : infos) {
1338                Serializable id = info.id;
1339                if (!done.add(id)) {
1340                    continue;
1341                }
1342                todo.add(id);
1343                descendants.add(info);
1344            }
1345        }
1346        return descendants;
1347    }
1348
1349    /**
1350     * Gets the children of a node as a list of NodeInfo.
1351     */
1352    protected List<NodeInfo> getChildrenNodeInfos(Collection<Serializable> ids) {
1353        List<NodeInfo> children = new LinkedList<NodeInfo>();
1354        SQLInfoSelect select = sqlInfo.getSelectChildrenNodeInfos(ids.size());
1355        if (logger.isLogEnabled()) {
1356            logger.logSQL(select.sql, ids);
1357        }
1358        Column where = select.whereColumns.get(0);
1359        try (PreparedStatement ps = connection.prepareStatement(select.sql)) {
1360            List<String> debugValues = null;
1361            if (logger.isLogEnabled()) {
1362                debugValues = new LinkedList<String>();
1363            }
1364            int ii = 1;
1365            for (Serializable id : ids) {
1366                where.setToPreparedStatement(ps, ii++, id);
1367            }
1368            try (ResultSet
1369            rs = ps.executeQuery()) {
1370                countExecute();
1371                while (rs.next()) {
1372                    NodeInfo info = getNodeInfo(rs, select.whatColumns);
1373                    children.add(info);
1374                    if (debugValues != null) {
1375                        if (debugValues.size() < DEBUG_MAX_TREE) {
1376                            debugValues.add(info.id + "/" + info.primaryType);
1377                        }
1378                    }
1379                }
1380            }
1381            if (debugValues != null) {
1382                if (debugValues.size() >= DEBUG_MAX_TREE) {
1383                    debugValues.add("... (" + children.size() + ") results");
1384                }
1385                logger.log("  -> " + debugValues);
1386            }
1387            return children;
1388        } catch (SQLException e) {
1389            throw new NuxeoException("Failed to get descendants", e);
1390        }
1391    }
1392
1393    protected NodeInfo getNodeInfo(ResultSet rs, List<Column> columns) throws SQLException {
1394        Serializable id = null;
1395        Serializable parentId = null;
1396        String primaryType = null;
1397        Boolean isProperty = null;
1398        Serializable targetId = null;
1399        Serializable versionableId = null;
1400        boolean isRetentionActive = false;
1401        int i = 1;
1402        for (Column column : columns) {
1403            String key = column.getKey();
1404            Serializable value = column.getFromResultSet(rs, i++);
1405            if (key.equals(Model.MAIN_KEY)) {
1406                id = value;
1407            } else if (key.equals(Model.HIER_PARENT_KEY)) {
1408                parentId = value;
1409            } else if (key.equals(Model.MAIN_PRIMARY_TYPE_KEY)) {
1410                primaryType = (String) value;
1411            } else if (key.equals(Model.HIER_CHILD_ISPROPERTY_KEY)) {
1412                isProperty = (Boolean) value;
1413            } else if (key.equals(Model.PROXY_TARGET_KEY)) {
1414                targetId = value;
1415            } else if (key.equals(Model.PROXY_VERSIONABLE_KEY)) {
1416                versionableId = value;
1417            } else if (key.equals(Model.MAIN_IS_RETENTION_ACTIVE_KEY)) {
1418                isRetentionActive = Boolean.TRUE.equals(value);
1419            }
1420            // no mixins (not useful to caller)
1421            // no versions (not fileable)
1422        }
1423        NodeInfo nodeInfo = new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId,
1424                isRetentionActive);
1425        return nodeInfo;
1426    }
1427
1428}