001/*
002 * (C) Copyright 2006-2011 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.sql.jdbc;
020
021import java.io.Serializable;
022import java.sql.Array;
023import java.sql.BatchUpdateException;
024import java.sql.CallableStatement;
025import java.sql.PreparedStatement;
026import java.sql.ResultSet;
027import java.sql.SQLException;
028import java.sql.Types;
029import java.util.ArrayList;
030import java.util.Arrays;
031import java.util.Calendar;
032import java.util.Collection;
033import java.util.Collections;
034import java.util.HashMap;
035import java.util.HashSet;
036import java.util.LinkedHashMap;
037import java.util.LinkedList;
038import java.util.List;
039import java.util.ListIterator;
040import java.util.Map;
041import java.util.Map.Entry;
042import java.util.Set;
043import java.util.stream.Collectors;
044
045import javax.sql.XADataSource;
046import javax.transaction.xa.XAException;
047import javax.transaction.xa.Xid;
048
049import org.apache.commons.lang.StringUtils;
050import org.nuxeo.ecm.core.api.NuxeoException;
051import org.nuxeo.ecm.core.api.model.Delta;
052import org.nuxeo.ecm.core.storage.sql.ClusterInvalidator;
053import org.nuxeo.ecm.core.query.QueryFilter;
054import org.nuxeo.ecm.core.storage.sql.Invalidations;
055import org.nuxeo.ecm.core.storage.sql.InvalidationsPropagator;
056import org.nuxeo.ecm.core.storage.sql.RowMapper.NodeInfo;
057import org.nuxeo.ecm.core.storage.sql.InvalidationsQueue;
058import org.nuxeo.ecm.core.storage.sql.Mapper;
059import org.nuxeo.ecm.core.storage.sql.Model;
060import org.nuxeo.ecm.core.storage.sql.PropertyType;
061import org.nuxeo.ecm.core.storage.sql.Row;
062import org.nuxeo.ecm.core.storage.sql.RowId;
063import org.nuxeo.ecm.core.storage.sql.RowMapper;
064import org.nuxeo.ecm.core.storage.sql.SelectionType;
065import org.nuxeo.ecm.core.storage.sql.SimpleFragment;
066import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelect;
067import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelection;
068import org.nuxeo.ecm.core.storage.sql.jdbc.db.Column;
069import org.nuxeo.ecm.core.storage.sql.jdbc.db.Table;
070import org.nuxeo.ecm.core.storage.sql.jdbc.db.Update;
071import org.nuxeo.runtime.api.Framework;
072import org.nuxeo.runtime.services.config.ConfigurationService;
073
074/**
075 * A {@link JDBCRowMapper} maps {@link Row}s to and from a JDBC database.
076 */
077public class JDBCRowMapper extends JDBCConnection implements RowMapper {
078
079    public static final int UPDATE_BATCH_SIZE = 100; // also insert/delete
080
081    public static final int DEBUG_MAX_TREE = 50;
082
083    /** Property to determine whether collection appends delete all then re-insert, or are optimized for append. */
084    public static final String COLLECTION_DELETE_BEFORE_APPEND_PROP = "org.nuxeo.vcs.list-delete-before-append";
085
086    /**
087     * Cluster invalidator, or {@code null} if this mapper does not participate in invalidation propagation (cluster
088     * invalidator, lock manager).
089     */
090    private final ClusterInvalidator clusterInvalidator;
091
092    private final InvalidationsPropagator invalidationsPropagator;
093
094    private final boolean collectionDeleteBeforeAppend;
095
096    private final CollectionIO aclCollectionIO;
097
098    private final CollectionIO scalarCollectionIO;
099
100    public JDBCRowMapper(Model model, SQLInfo sqlInfo, XADataSource xadatasource, ClusterInvalidator clusterInvalidator,
101            InvalidationsPropagator invalidationsPropagator, boolean noSharing) {
102        super(model, sqlInfo, xadatasource, noSharing);
103        this.clusterInvalidator = clusterInvalidator;
104        this.invalidationsPropagator = invalidationsPropagator;
105        ConfigurationService configurationService = Framework.getService(ConfigurationService.class);
106        collectionDeleteBeforeAppend = configurationService.isBooleanPropertyTrue(COLLECTION_DELETE_BEFORE_APPEND_PROP);
107        aclCollectionIO = new ACLCollectionIO(collectionDeleteBeforeAppend);
108        scalarCollectionIO = new ScalarCollectionIO(collectionDeleteBeforeAppend);
109    }
110
111    @Override
112    public Invalidations receiveInvalidations() {
113        if (clusterInvalidator != null) {
114            Invalidations invalidations = clusterInvalidator.receiveInvalidations();
115            // send received invalidations to all mappers
116            if (invalidations != null && !invalidations.isEmpty()) {
117                invalidationsPropagator.propagateInvalidations(invalidations, null);
118            }
119            return invalidations;
120        } else {
121            return null;
122        }
123    }
124
125
126    @Override
127    public void sendInvalidations(Invalidations invalidations) {
128        if (clusterInvalidator != null) {
129            clusterInvalidator.sendInvalidations(invalidations);
130        }
131    }
132
133    @Override
134    public void clearCache() {
135        // no cache
136    }
137
138    @Override
139    public long getCacheSize() {
140        return 0;
141    }
142
143    @Override
144    public void rollback(Xid xid) throws XAException {
145        try {
146            xaresource.rollback(xid);
147        } catch (XAException e) {
148            logger.error("XA error on rollback: " + e);
149            throw e;
150        }
151    }
152
153    protected CollectionIO getCollectionIO(String tableName) {
154        return tableName.equals(model.ACL_TABLE_NAME) ? aclCollectionIO : scalarCollectionIO;
155    }
156
157    @Override
158    public Serializable generateNewId() {
159        try {
160            return dialect.getGeneratedId(connection);
161        } catch (SQLException e) {
162            throw new NuxeoException(e);
163        }
164    }
165
166    /*
167     * ----- RowIO -----
168     */
169
170    @Override
171    public List<? extends RowId> read(Collection<RowId> rowIds, boolean cacheOnly) {
172        List<RowId> res = new ArrayList<RowId>(rowIds.size());
173        if (cacheOnly) {
174            // return no data
175            for (RowId rowId : rowIds) {
176                res.add(new RowId(rowId));
177            }
178            return res;
179        }
180        // reorganize by table
181        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
182        for (RowId rowId : rowIds) {
183            Set<Serializable> ids = tableIds.get(rowId.tableName);
184            if (ids == null) {
185                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
186            }
187            ids.add(rowId.id);
188        }
189        // read on each table
190        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
191            String tableName = en.getKey();
192            Set<Serializable> ids = new HashSet<Serializable>(en.getValue());
193            int size = ids.size();
194            int chunkSize = sqlInfo.getMaximumArgsForIn();
195            List<Row> rows;
196            if (size > chunkSize) {
197                List<Serializable> idList = new ArrayList<Serializable>(ids);
198                rows = new ArrayList<Row>(size);
199                for (int start = 0; start < size; start += chunkSize) {
200                    int end = start + chunkSize;
201                    if (end > size) {
202                        end = size;
203                    }
204                    // needs to be Serializable -> copy
205                    List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
206                    List<Row> chunkRows;
207                    if (model.isCollectionFragment(tableName)) {
208                        chunkRows = readCollectionArrays(tableName, chunkIds);
209                    } else {
210                        chunkRows = readSimpleRows(tableName, chunkIds);
211                    }
212                    rows.addAll(chunkRows);
213                }
214            } else {
215                if (model.isCollectionFragment(tableName)) {
216                    rows = readCollectionArrays(tableName, ids);
217                } else {
218                    rows = readSimpleRows(tableName, ids);
219                }
220            }
221            // check we have all the ids (readSimpleRows may have some
222            // missing)
223            for (Row row : rows) {
224                res.add(row);
225                ids.remove(row.id);
226            }
227            // for the missing ids record an empty RowId
228            for (Serializable id : ids) {
229                res.add(new RowId(tableName, id));
230            }
231        }
232        return res;
233    }
234
235    /**
236     * Gets a list of rows for {@link SimpleFragment}s from the database, given the table name and the ids.
237     *
238     * @param tableName the table name
239     * @param ids the ids
240     * @return the list of rows, without the missing ones
241     */
242    protected List<Row> readSimpleRows(String tableName, Collection<Serializable> ids) {
243        if (ids.isEmpty()) {
244            return Collections.emptyList();
245        }
246        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size());
247        Map<String, Serializable> criteriaMap = Collections.singletonMap(model.MAIN_KEY, (Serializable) ids);
248        return getSelectRows(tableName, select, criteriaMap, null, false);
249    }
250
251    /**
252     * Reads several collection rows, given a table name and the ids.
253     *
254     * @param tableName the table name
255     * @param ids the ids
256     */
257    protected List<Row> readCollectionArrays(String tableName, Collection<Serializable> ids) {
258        if (ids.isEmpty()) {
259            return Collections.emptyList();
260        }
261        String[] orderBys = { model.MAIN_KEY, model.COLL_TABLE_POS_KEY }; // clusters
262                                                                          // results
263        Set<String> skipColumns = new HashSet<String>(Arrays.asList(model.COLL_TABLE_POS_KEY));
264        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size(), orderBys, skipColumns);
265
266        String sql = select.sql;
267        try {
268            if (logger.isLogEnabled()) {
269                logger.logSQL(sql, ids);
270            }
271            PreparedStatement ps = connection.prepareStatement(sql);
272            ResultSet rs = null;
273            try {
274                int i = 1;
275                for (Serializable id : ids) {
276                    dialect.setId(ps, i++, id);
277                }
278                rs = ps.executeQuery();
279                countExecute();
280
281                // get all values from result set, separate by ids
282                // the result set is ordered by id, pos
283                CollectionIO io = getCollectionIO(tableName);
284                PropertyType ftype = model.getCollectionFragmentType(tableName);
285                PropertyType type = ftype.getArrayBaseType();
286                Serializable curId = null;
287                List<Serializable> list = null;
288                Serializable[] returnId = new Serializable[1];
289                int[] returnPos = { -1 };
290                List<Row> res = new LinkedList<Row>();
291                Set<Serializable> remainingIds = new HashSet<Serializable>(ids);
292                while (rs.next()) {
293                    Serializable value = io.getCurrentFromResultSet(rs, select.whatColumns, model, returnId, returnPos);
294                    Serializable newId = returnId[0];
295                    if (newId != null && !newId.equals(curId)) {
296                        // flush old list
297                        if (list != null) {
298                            res.add(new Row(tableName, curId, type.collectionToArray(list)));
299                            remainingIds.remove(curId);
300                        }
301                        curId = newId;
302                        list = new ArrayList<Serializable>();
303                    }
304                    list.add(value);
305                }
306                if (curId != null && list != null) {
307                    // flush last list
308                    res.add(new Row(tableName, curId, type.collectionToArray(list)));
309                    remainingIds.remove(curId);
310                }
311
312                // fill empty ones
313                if (!remainingIds.isEmpty()) {
314                    Serializable[] emptyArray = ftype.getEmptyArray();
315                    for (Serializable id : remainingIds) {
316                        res.add(new Row(tableName, id, emptyArray));
317                    }
318                }
319                if (logger.isLogEnabled()) {
320                    for (Row row : res) {
321                        logger.log("  -> " + row);
322                    }
323                }
324                return res;
325            } finally {
326                closeStatement(ps, rs);
327            }
328        } catch (SQLException e) {
329            throw new NuxeoException("Could not select: " + sql, e);
330        }
331    }
332
333    /**
334     * Fetches the rows for a select with fixed criteria given as two maps (a criteriaMap whose values and up in the
335     * returned rows, and a joinMap for other criteria).
336     */
337    protected List<Row> getSelectRows(String tableName, SQLInfoSelect select, Map<String, Serializable> criteriaMap,
338            Map<String, Serializable> joinMap, boolean limitToOne) {
339        List<Row> list = new LinkedList<Row>();
340        if (select.whatColumns.isEmpty()) {
341            // happens when we fetch a fragment whose columns are all opaque
342            // check it's a by-id query
343            if (select.whereColumns.size() == 1 && select.whereColumns.get(0).getKey() == model.MAIN_KEY
344                    && joinMap == null) {
345                Row row = new Row(tableName, criteriaMap);
346                if (select.opaqueColumns != null) {
347                    for (Column column : select.opaqueColumns) {
348                        row.putNew(column.getKey(), Row.OPAQUE);
349                    }
350                }
351                list.add(row);
352                return list;
353            }
354            // else do a useless select but the criteria are more complex and we
355            // can't shortcut
356        }
357        if (joinMap == null) {
358            joinMap = Collections.emptyMap();
359        }
360        PreparedStatement ps = null;
361        ResultSet rs = null;
362        try {
363            ps = connection.prepareStatement(select.sql);
364
365            /*
366             * Compute where part.
367             */
368            List<Serializable> debugValues = null;
369            if (logger.isLogEnabled()) {
370                debugValues = new LinkedList<Serializable>();
371            }
372            int i = 1;
373            for (Column column : select.whereColumns) {
374                String key = column.getKey();
375                Serializable v;
376                if (criteriaMap.containsKey(key)) {
377                    v = criteriaMap.get(key);
378                } else if (joinMap.containsKey(key)) {
379                    v = joinMap.get(key);
380                } else {
381                    throw new RuntimeException(key);
382                }
383                if (v == null) {
384                    throw new NuxeoException("Null value for key: " + key);
385                }
386                if (v instanceof Collection<?>) {
387                    // allow insert of several values, for the IN (...) case
388                    for (Object vv : (Collection<?>) v) {
389                        column.setToPreparedStatement(ps, i++, (Serializable) vv);
390                        if (debugValues != null) {
391                            debugValues.add((Serializable) vv);
392                        }
393                    }
394                } else {
395                    column.setToPreparedStatement(ps, i++, v);
396                    if (debugValues != null) {
397                        debugValues.add(v);
398                    }
399                }
400            }
401            if (debugValues != null) {
402                logger.logSQL(select.sql, debugValues);
403            }
404
405            /*
406             * Execute query.
407             */
408            rs = ps.executeQuery();
409            countExecute();
410
411            /*
412             * Construct the maps from the result set.
413             */
414            while (rs.next()) {
415                Row row = new Row(tableName, criteriaMap);
416                i = 1;
417                for (Column column : select.whatColumns) {
418                    row.put(column.getKey(), column.getFromResultSet(rs, i++));
419                }
420                if (select.opaqueColumns != null) {
421                    for (Column column : select.opaqueColumns) {
422                        row.putNew(column.getKey(), Row.OPAQUE);
423                    }
424                }
425                if (logger.isLogEnabled()) {
426                    logger.logResultSet(rs, select.whatColumns);
427                }
428                list.add(row);
429                if (limitToOne) {
430                    return list;
431                }
432            }
433            if (limitToOne) {
434                return Collections.emptyList();
435            }
436            return list;
437        } catch (SQLException e) {
438            checkConcurrentUpdate(e);
439            throw new NuxeoException("Could not select: " + select.sql, e);
440        } finally {
441            try {
442                closeStatement(ps, rs);
443            } catch (SQLException e) {
444                logger.error(e.getMessage(), e);
445            }
446        }
447    }
448
449    @Override
450    public void write(RowBatch batch) {
451        if (!batch.creates.isEmpty()) {
452            writeCreates(batch.creates);
453        }
454        if (!batch.updates.isEmpty()) {
455            writeUpdates(batch.updates);
456        }
457        if (!batch.deletes.isEmpty()) {
458            writeDeletes(batch.deletes);
459        }
460        // batch.deletesDependent not executed
461    }
462
463    protected void writeCreates(List<Row> creates) {
464        // reorganize by table
465        Map<String, List<Row>> tableRows = new LinkedHashMap<String, List<Row>>();
466        // hierarchy table first because there are foreign keys to it
467        tableRows.put(model.HIER_TABLE_NAME, new LinkedList<Row>());
468        for (Row row : creates) {
469            List<Row> rows = tableRows.get(row.tableName);
470            if (rows == null) {
471                tableRows.put(row.tableName, rows = new LinkedList<Row>());
472            }
473            rows.add(row);
474        }
475        // inserts on each table
476        for (Entry<String, List<Row>> en : tableRows.entrySet()) {
477            String tableName = en.getKey();
478            List<Row> rows = en.getValue();
479            if (model.isCollectionFragment(tableName)) {
480                List<RowUpdate> rowus = rows.stream().map(RowUpdate::new).collect(Collectors.toList());
481                insertCollectionRows(tableName, rowus);
482            } else {
483                insertSimpleRows(tableName, rows);
484            }
485        }
486    }
487
488    protected void writeUpdates(Set<RowUpdate> updates) {
489        // reorganize by table
490        Map<String, List<RowUpdate>> tableRows = new HashMap<String, List<RowUpdate>>();
491        for (RowUpdate rowu : updates) {
492            List<RowUpdate> rows = tableRows.get(rowu.row.tableName);
493            if (rows == null) {
494                tableRows.put(rowu.row.tableName, rows = new LinkedList<RowUpdate>());
495            }
496            rows.add(rowu);
497        }
498        // updates on each table
499        for (Entry<String, List<RowUpdate>> en : tableRows.entrySet()) {
500            String tableName = en.getKey();
501            List<RowUpdate> rows = en.getValue();
502            if (model.isCollectionFragment(tableName)) {
503                updateCollectionRows(tableName, rows);
504            } else {
505                updateSimpleRows(tableName, rows);
506            }
507        }
508    }
509
510    protected void writeDeletes(Collection<RowId> deletes) {
511        // reorganize by table
512        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
513        for (RowId rowId : deletes) {
514            Set<Serializable> ids = tableIds.get(rowId.tableName);
515            if (ids == null) {
516                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
517            }
518            ids.add(rowId.id);
519        }
520        // delete on each table
521        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
522            String tableName = en.getKey();
523            Set<Serializable> ids = en.getValue();
524            deleteRows(tableName, ids);
525        }
526    }
527
528    /**
529     * Inserts multiple rows, all for the same table.
530     */
531    protected void insertSimpleRows(String tableName, List<Row> rows) {
532        if (rows.isEmpty()) {
533            return;
534        }
535        String sql = sqlInfo.getInsertSql(tableName);
536        if (sql == null) {
537            throw new NuxeoException("Unknown table: " + tableName);
538        }
539        String loggedSql = supportsBatchUpdates && rows.size() > 1 ? sql + " -- BATCHED" : sql;
540        List<Column> columns = sqlInfo.getInsertColumns(tableName);
541        try {
542            PreparedStatement ps = connection.prepareStatement(sql);
543            try {
544                int batch = 0;
545                for (Row row : rows) {
546                    batch++;
547                    if (logger.isLogEnabled()) {
548                        logger.logSQL(loggedSql, columns, row);
549                    }
550                    int i = 1;
551                    for (Column column : columns) {
552                        column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
553                    }
554                    if (supportsBatchUpdates) {
555                        ps.addBatch();
556                        if (batch % UPDATE_BATCH_SIZE == 0) {
557                            ps.executeBatch();
558                            countExecute();
559                        }
560                    } else {
561                        ps.execute();
562                        countExecute();
563                    }
564                }
565                if (supportsBatchUpdates) {
566                    ps.executeBatch();
567                    countExecute();
568                }
569            } finally {
570                closeStatement(ps);
571            }
572        } catch (SQLException e) {
573            if (e instanceof BatchUpdateException) {
574                BatchUpdateException bue = (BatchUpdateException) e;
575                if (e.getCause() == null && bue.getNextException() != null) {
576                    // provide a readable cause in the stack trace
577                    e.initCause(bue.getNextException());
578                }
579            }
580            checkConcurrentUpdate(e);
581            throw new NuxeoException("Could not insert: " + sql, e);
582        }
583    }
584
585    /**
586     * Updates multiple collection rows, all for the same table.
587     */
588    protected void insertCollectionRows(String tableName, List<RowUpdate> rowus) {
589        if (rowus.isEmpty()) {
590            return;
591        }
592        String sql = sqlInfo.getInsertSql(tableName);
593        List<Column> columns = sqlInfo.getInsertColumns(tableName);
594        CollectionIO io = getCollectionIO(tableName);
595        try {
596            PreparedStatement ps = connection.prepareStatement(sql);
597            try {
598                io.executeInserts(ps, rowus, columns, supportsBatchUpdates, sql, this);
599            } finally {
600                closeStatement(ps);
601            }
602        } catch (SQLException e) {
603            throw new NuxeoException("Could not insert: " + sql, e);
604        }
605    }
606
607    /**
608     * Updates multiple simple rows, all for the same table.
609     */
610    protected void updateSimpleRows(String tableName, List<RowUpdate> rows) {
611        if (rows.isEmpty()) {
612            return;
613        }
614
615        // reorganize by unique sets of keys + which ones are for delta updates
616        Map<String, List<RowUpdate>> updatesByCanonKeys = new HashMap<>();
617        Map<String, Collection<String>> keysByCanonKeys = new HashMap<>();
618        Map<String, Set<String>> deltasByCanonKeys = new HashMap<>();
619        for (RowUpdate rowu : rows) {
620            List<String> keys = new ArrayList<String>(rowu.keys);
621            if (keys.isEmpty()) {
622                continue;
623            }
624            Set<String> deltas = new HashSet<>();
625            for (ListIterator<String> it = keys.listIterator(); it.hasNext();) {
626                String key = it.next();
627                Serializable value = rowu.row.get(key);
628                if (value instanceof Delta && ((Delta) value).getBase() != null) {
629                    deltas.add(key);
630                    it.set(key + '+');
631                }
632            }
633            Collections.sort(keys);
634            String ck = StringUtils.join(keys, ','); // canonical keys
635            List<RowUpdate> keysUpdates = updatesByCanonKeys.get(ck);
636            if (keysUpdates == null) {
637                updatesByCanonKeys.put(ck, keysUpdates = new LinkedList<RowUpdate>());
638                keysByCanonKeys.put(ck, rowu.keys);
639                deltasByCanonKeys.put(ck, deltas);
640            }
641            keysUpdates.add(rowu);
642        }
643
644        for (String ck : updatesByCanonKeys.keySet()) {
645            List<RowUpdate> keysUpdates = updatesByCanonKeys.get(ck);
646            Collection<String> keys = keysByCanonKeys.get(ck);
647            Set<String> deltas = deltasByCanonKeys.get(ck);
648            SQLInfoSelect update = sqlInfo.getUpdateById(tableName, keys, deltas);
649            String loggedSql = supportsBatchUpdates && rows.size() > 1 ? update.sql + " -- BATCHED" : update.sql;
650            try {
651                PreparedStatement ps = connection.prepareStatement(update.sql);
652                int batch = 0;
653                try {
654                    for (RowUpdate rowu : keysUpdates) {
655                        batch++;
656                        if (logger.isLogEnabled()) {
657                            logger.logSQL(loggedSql, update.whatColumns, rowu.row, deltas);
658                        }
659                        int i = 1;
660                        for (Column column : update.whatColumns) {
661                            Serializable value = rowu.row.get(column.getKey());
662                            if (value instanceof Delta) {
663                                value = ((Delta) value).getDeltaValue();
664                            }
665                            column.setToPreparedStatement(ps, i++, value);
666                        }
667                        if (supportsBatchUpdates) {
668                            ps.addBatch();
669                            if (batch % UPDATE_BATCH_SIZE == 0) {
670                                int[] counts = ps.executeBatch();
671                                countExecute();
672                                logger.logCounts(counts);
673                            }
674                        } else {
675                            int count = ps.executeUpdate();
676                            countExecute();
677                            logger.logCount(count);
678                        }
679                    }
680                    if (supportsBatchUpdates) {
681                        int[] counts = ps.executeBatch();
682                        countExecute();
683                        logger.logCounts(counts);
684                    }
685                } finally {
686                    closeStatement(ps);
687                }
688            } catch (SQLException e) {
689                throw new NuxeoException("Could not update: " + update.sql, e);
690            }
691        }
692    }
693
694    protected void updateCollectionRows(String tableName, List<RowUpdate> rowus) {
695        Set<Serializable> deleteIds = new HashSet<>();
696        for (RowUpdate rowu : rowus) {
697            if (rowu.pos == -1 || collectionDeleteBeforeAppend) {
698                deleteIds.add(rowu.row.id);
699            }
700        }
701        deleteRows(tableName, deleteIds);
702        insertCollectionRows(tableName, rowus);
703    }
704
705    /**
706     * Deletes multiple rows, all for the same table.
707     */
708    protected void deleteRows(String tableName, Set<Serializable> ids) {
709        if (ids.isEmpty()) {
710            return;
711        }
712        int size = ids.size();
713        int chunkSize = sqlInfo.getMaximumArgsForIn();
714        if (size > chunkSize) {
715            List<Serializable> idList = new ArrayList<Serializable>(ids);
716            for (int start = 0; start < size; start += chunkSize) {
717                int end = start + chunkSize;
718                if (end > size) {
719                    end = size;
720                }
721                // needs to be Serializable -> copy
722                List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
723                deleteRowsDirect(tableName, chunkIds);
724            }
725        } else {
726            deleteRowsDirect(tableName, ids);
727        }
728    }
729
730    protected void deleteRowsSoft(List<NodeInfo> nodeInfos) {
731        try {
732            int size = nodeInfos.size();
733            List<Serializable> ids = new ArrayList<Serializable>(size);
734            for (NodeInfo info : nodeInfos) {
735                ids.add(info.id);
736            }
737            int chunkSize = 100; // max size of ids array
738            if (size <= chunkSize) {
739                doSoftDeleteRows(ids);
740            } else {
741                for (int start = 0; start < size;) {
742                    int end = start + chunkSize;
743                    if (end > size) {
744                        end = size;
745                    }
746                    doSoftDeleteRows(ids.subList(start, end));
747                    start = end;
748                }
749            }
750        } catch (SQLException e) {
751            throw new NuxeoException("Could not soft delete", e);
752        }
753    }
754
755    // not chunked
756    protected void doSoftDeleteRows(List<Serializable> ids) throws SQLException {
757        Serializable whereIds = newIdArray(ids);
758        Calendar now = Calendar.getInstance();
759        String sql = sqlInfo.getSoftDeleteSql();
760        if (logger.isLogEnabled()) {
761            logger.logSQL(sql, Arrays.asList(whereIds, now));
762        }
763        PreparedStatement ps = connection.prepareStatement(sql);
764        try {
765            setToPreparedStatementIdArray(ps, 1, whereIds);
766            dialect.setToPreparedStatementTimestamp(ps, 2, now, null);
767            ps.execute();
768            countExecute();
769            return;
770        } finally {
771            try {
772                closeStatement(ps);
773            } catch (SQLException e) {
774                logger.error(e.getMessage(), e);
775            }
776        }
777    }
778
779    protected Serializable newIdArray(Collection<Serializable> ids) {
780        if (dialect.supportsArrays()) {
781            return ids.toArray(); // Object[]
782        } else {
783            // join with '|'
784            StringBuilder b = new StringBuilder();
785            for (Serializable id : ids) {
786                b.append(id);
787                b.append('|');
788            }
789            b.setLength(b.length() - 1);
790            return b.toString();
791        }
792    }
793
794    protected void setToPreparedStatementIdArray(PreparedStatement ps, int index, Serializable idArray)
795            throws SQLException {
796        if (idArray instanceof String) {
797            ps.setString(index, (String) idArray);
798        } else {
799            Array array = dialect.createArrayOf(Types.OTHER, (Object[]) idArray, connection);
800            ps.setArray(index, array);
801        }
802    }
803
804    /**
805     * Clean up soft-deleted rows.
806     * <p>
807     * Rows deleted more recently than the beforeTime are left alone. Only a limited number of rows may be deleted, to
808     * prevent transaction during too long.
809     *
810     * @param max the maximum number of rows to delete at a time
811     * @param beforeTime the maximum deletion time of the rows to delete
812     * @return the number of rows deleted
813     */
814    public int cleanupDeletedRows(int max, Calendar beforeTime) {
815        if (max < 0) {
816            max = 0;
817        }
818        String sql = sqlInfo.getSoftDeleteCleanupSql();
819        if (logger.isLogEnabled()) {
820            logger.logSQL(sql, Arrays.<Serializable> asList(beforeTime, Long.valueOf(max)));
821        }
822        try {
823            if (sql.startsWith("{")) {
824                // callable statement
825                boolean outFirst = sql.startsWith("{?=");
826                int outIndex = outFirst ? 1 : 3;
827                int inIndex = outFirst ? 2 : 1;
828                CallableStatement cs = connection.prepareCall(sql);
829                try {
830                    cs.setInt(inIndex, max);
831                    dialect.setToPreparedStatementTimestamp(cs, inIndex + 1, beforeTime, null);
832                    cs.registerOutParameter(outIndex, Types.INTEGER);
833                    cs.execute();
834                    int count = cs.getInt(outIndex);
835                    logger.logCount(count);
836                    return count;
837                } finally {
838                    cs.close();
839                }
840            } else {
841                // standard prepared statement with result set
842                PreparedStatement ps = connection.prepareStatement(sql);
843                try {
844                    ps.setInt(1, max);
845                    dialect.setToPreparedStatementTimestamp(ps, 2, beforeTime, null);
846                    ResultSet rs = ps.executeQuery();
847                    countExecute();
848                    if (!rs.next()) {
849                        throw new NuxeoException("Cannot get result");
850                    }
851                    int count = rs.getInt(1);
852                    logger.logCount(count);
853                    return count;
854                } finally {
855                    closeStatement(ps);
856                }
857            }
858        } catch (SQLException e) {
859            throw new NuxeoException("Could not purge soft delete", e);
860        }
861    }
862
863    protected void deleteRowsDirect(String tableName, Collection<Serializable> ids) {
864        try {
865            String sql = sqlInfo.getDeleteSql(tableName, ids.size());
866            if (logger.isLogEnabled()) {
867                logger.logSQL(sql, ids);
868            }
869            PreparedStatement ps = connection.prepareStatement(sql);
870            try {
871                int i = 1;
872                for (Serializable id : ids) {
873                    dialect.setId(ps, i++, id);
874                }
875                int count = ps.executeUpdate();
876                countExecute();
877                logger.logCount(count);
878            } finally {
879                closeStatement(ps);
880            }
881        } catch (SQLException e) {
882            checkConcurrentUpdate(e);
883            throw new NuxeoException("Could not delete: " + tableName, e);
884        }
885    }
886
887    @Override
888    public Row readSimpleRow(RowId rowId) {
889        SQLInfoSelect select = sqlInfo.selectFragmentById.get(rowId.tableName);
890        Map<String, Serializable> criteriaMap = Collections.singletonMap(model.MAIN_KEY, rowId.id);
891        List<Row> maps = getSelectRows(rowId.tableName, select, criteriaMap, null, true);
892        return maps.isEmpty() ? null : maps.get(0);
893    }
894
895    @Override
896    public Map<String, String> getBinaryFulltext(RowId rowId) {
897        ArrayList<String> columns = new ArrayList<String>();
898        for (String index : model.getFulltextConfiguration().indexesAllBinary) {
899            String col = Model.FULLTEXT_BINARYTEXT_KEY + model.getFulltextIndexSuffix(index);
900            columns.add(col);
901        }
902        Serializable id = rowId.id;
903        Map<String, String> ret = new HashMap<String, String>(columns.size());
904        String sql = dialect.getBinaryFulltextSql(columns);
905        if (sql == null) {
906            logger.info("getBinaryFulltextSql not supported for dialect " + dialect);
907            return ret;
908        }
909        if (logger.isLogEnabled()) {
910            logger.logSQL(sql, Collections.singletonList(id));
911        }
912        PreparedStatement ps = null;
913        ResultSet rs = null;
914        try {
915            ps = connection.prepareStatement(sql);
916            try {
917                dialect.setId(ps, 1, id);
918                rs = ps.executeQuery();
919                while (rs.next()) {
920                    for (int i = 1; i <= columns.size(); i++) {
921                        ret.put(columns.get(i - 1), rs.getString(i));
922                    }
923                }
924                if (logger.isLogEnabled()) {
925                    logger.log("  -> " + ret);
926                }
927            } finally {
928                closeStatement(ps, rs);
929            }
930        } catch (SQLException e) {
931            throw new NuxeoException("Could not select: " + sql, e);
932        }
933        return ret;
934    }
935
936    @Override
937    public Serializable[] readCollectionRowArray(RowId rowId) {
938        String tableName = rowId.tableName;
939        Serializable id = rowId.id;
940        String sql = sqlInfo.selectFragmentById.get(tableName).sql;
941        try {
942            // XXX statement should be already prepared
943            if (logger.isLogEnabled()) {
944                logger.logSQL(sql, Collections.singletonList(id));
945            }
946            PreparedStatement ps = connection.prepareStatement(sql);
947            ResultSet rs = null;
948            try {
949                List<Column> columns = sqlInfo.selectFragmentById.get(tableName).whatColumns;
950                dialect.setId(ps, 1, id); // assumes only one primary column
951                rs = ps.executeQuery();
952                countExecute();
953
954                // construct the resulting collection using each row
955                CollectionIO io = getCollectionIO(tableName);
956                List<Serializable> list = new ArrayList<Serializable>();
957                Serializable[] returnId = new Serializable[1];
958                int[] returnPos = { -1 };
959                while (rs.next()) {
960                    list.add(io.getCurrentFromResultSet(rs, columns, model, returnId, returnPos));
961                }
962                PropertyType type = model.getCollectionFragmentType(tableName).getArrayBaseType();
963                Serializable[] array = type.collectionToArray(list);
964
965                if (logger.isLogEnabled()) {
966                    logger.log("  -> " + Arrays.asList(array));
967                }
968                return array;
969            } finally {
970                closeStatement(ps, rs);
971            }
972        } catch (SQLException e) {
973            throw new NuxeoException("Could not select: " + sql, e);
974        }
975    }
976
977    @Override
978    public List<Row> readSelectionRows(SelectionType selType, Serializable selId, Serializable filter,
979            Serializable criterion, boolean limitToOne) {
980        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
981        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
982        criteriaMap.put(selType.selKey, selId);
983        SQLInfoSelect select;
984        if (filter == null) {
985            select = selInfo.selectAll;
986        } else {
987            select = selInfo.selectFiltered;
988            criteriaMap.put(selType.filterKey, filter);
989        }
990        if (selType.criterionKey != null) {
991            criteriaMap.put(selType.criterionKey, criterion);
992        }
993        return getSelectRows(selType.tableName, select, criteriaMap, null, limitToOne);
994    }
995
996    @Override
997    public CopyResult copy(IdWithTypes source, Serializable destParentId, String destName, Row overwriteRow) {
998        // assert !model.separateMainTable; // other case not implemented
999        Invalidations invalidations = new Invalidations();
1000        try {
1001            Map<Serializable, Serializable> idMap = new LinkedHashMap<Serializable, Serializable>();
1002            Map<Serializable, IdWithTypes> idToTypes = new HashMap<Serializable, IdWithTypes>();
1003            // copy the hierarchy fragments recursively
1004            Serializable overwriteId = overwriteRow == null ? null : overwriteRow.id;
1005            if (overwriteId != null) {
1006                // overwrite hier root with explicit values
1007                String tableName = model.HIER_TABLE_NAME;
1008                updateSimpleRowWithValues(tableName, overwriteRow);
1009                idMap.put(source.id, overwriteId);
1010                // invalidate
1011                invalidations.addModified(new RowId(tableName, overwriteId));
1012            }
1013            // create the new hierarchy by copy
1014            boolean resetVersion = destParentId != null;
1015            Serializable newRootId = copyHierRecursive(source, destParentId, destName, overwriteId, resetVersion,
1016                    idMap, idToTypes);
1017            // invalidate children
1018            Serializable invalParentId = overwriteId == null ? destParentId : overwriteId;
1019            if (invalParentId != null) { // null for a new version
1020                invalidations.addModified(new RowId(Invalidations.PARENT, invalParentId));
1021            }
1022            // copy all collected fragments
1023            Set<Serializable> proxyIds = new HashSet<Serializable>();
1024            for (Entry<String, Set<Serializable>> entry : model.getPerFragmentIds(idToTypes).entrySet()) {
1025                String tableName = entry.getKey();
1026                if (tableName.equals(model.HIER_TABLE_NAME)) {
1027                    // already done
1028                    continue;
1029                }
1030                if (tableName.equals(model.VERSION_TABLE_NAME)) {
1031                    // versions not fileable
1032                    // restore must not copy versions either
1033                    continue;
1034                }
1035                Set<Serializable> ids = entry.getValue();
1036                if (tableName.equals(model.PROXY_TABLE_NAME)) {
1037                    for (Serializable id : ids) {
1038                        proxyIds.add(idMap.get(id)); // copied ids
1039                    }
1040                }
1041                Boolean invalidation = copyRows(tableName, ids, idMap, overwriteId);
1042                if (invalidation != null) {
1043                    // overwrote something
1044                    // make sure things are properly invalidated in this and
1045                    // other sessions
1046                    if (Boolean.TRUE.equals(invalidation)) {
1047                        invalidations.addModified(new RowId(tableName, overwriteId));
1048                    } else {
1049                        invalidations.addDeleted(new RowId(tableName, overwriteId));
1050                    }
1051                }
1052            }
1053            return new CopyResult(newRootId, invalidations, proxyIds);
1054        } catch (SQLException e) {
1055            throw new NuxeoException("Could not copy: " + source.id.toString(), e);
1056        }
1057    }
1058
1059    /**
1060     * Updates a row in the database with given explicit values.
1061     */
1062    protected void updateSimpleRowWithValues(String tableName, Row row) {
1063        Update update = sqlInfo.getUpdateByIdForKeys(tableName, row.getKeys());
1064        Table table = update.getTable();
1065        String sql = update.getStatement();
1066        try {
1067            PreparedStatement ps = connection.prepareStatement(sql);
1068            try {
1069                if (logger.isLogEnabled()) {
1070                    List<Serializable> values = new LinkedList<Serializable>();
1071                    values.addAll(row.getValues());
1072                    values.add(row.id); // id last in SQL
1073                    logger.logSQL(sql, values);
1074                }
1075                int i = 1;
1076                List<String> keys = row.getKeys();
1077                List<Serializable> values = row.getValues();
1078                int size = keys.size();
1079                for (int r = 0; r < size; r++) {
1080                    String key = keys.get(r);
1081                    Serializable value = values.get(r);
1082                    table.getColumn(key).setToPreparedStatement(ps, i++, value);
1083                }
1084                dialect.setId(ps, i, row.id); // id last in SQL
1085                int count = ps.executeUpdate();
1086                countExecute();
1087                logger.logCount(count);
1088            } finally {
1089                closeStatement(ps);
1090            }
1091        } catch (SQLException e) {
1092            throw new NuxeoException("Could not update: " + sql, e);
1093        }
1094    }
1095
1096    /**
1097     * Copies hierarchy from id to parentId, and recurses.
1098     * <p>
1099     * If name is {@code null}, then the original name is kept.
1100     * <p>
1101     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1102     * filled with the type of each (source) fragment.
1103     * <p>
1104     * TODO: this should be optimized to use a stored procedure.
1105     *
1106     * @param overwriteId when not {@code null}, the copy is done onto this existing node (skipped)
1107     * @return the new root id
1108     */
1109    protected Serializable copyHierRecursive(IdWithTypes source, Serializable parentId, String name,
1110            Serializable overwriteId, boolean resetVersion, Map<Serializable, Serializable> idMap,
1111            Map<Serializable, IdWithTypes> idToTypes) throws SQLException {
1112        idToTypes.put(source.id, source);
1113        Serializable newId;
1114        if (overwriteId == null) {
1115            newId = copyHier(source.id, parentId, name, resetVersion, idMap);
1116        } else {
1117            newId = overwriteId;
1118            idMap.put(source.id, newId);
1119        }
1120        // recurse in children
1121        boolean onlyComplex = parentId == null;
1122        for (IdWithTypes child : getChildrenIdsWithTypes(source.id, onlyComplex)) {
1123            copyHierRecursive(child, newId, null, null, resetVersion, idMap, idToTypes);
1124        }
1125        return newId;
1126    }
1127
1128    /**
1129     * Copies hierarchy from id to a new child of parentId.
1130     * <p>
1131     * If name is {@code null}, then the original name is kept.
1132     * <p>
1133     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1134     * filled with the type of each (source) fragment.
1135     *
1136     * @return the new id
1137     */
1138    protected Serializable copyHier(Serializable id, Serializable parentId, String name, boolean resetVersion,
1139            Map<Serializable, Serializable> idMap) throws SQLException {
1140        boolean explicitName = name != null;
1141
1142        SQLInfoSelect copy = sqlInfo.getCopyHier(explicitName, resetVersion);
1143        PreparedStatement ps = connection.prepareStatement(copy.sql);
1144        try {
1145            Serializable newId = generateNewId();
1146
1147            List<Serializable> debugValues = null;
1148            if (logger.isLogEnabled()) {
1149                debugValues = new ArrayList<Serializable>(4);
1150            }
1151            int i = 1;
1152            for (Column column : copy.whatColumns) {
1153                String key = column.getKey();
1154                Serializable v;
1155                if (key.equals(model.HIER_PARENT_KEY)) {
1156                    v = parentId;
1157                } else if (key.equals(model.HIER_CHILD_NAME_KEY)) {
1158                    // present if name explicitely set (first iteration)
1159                    v = name;
1160                } else if (key.equals(model.MAIN_KEY)) {
1161                    // present if APP_UUID generation
1162                    v = newId;
1163                } else if (key.equals(model.MAIN_BASE_VERSION_KEY) || key.equals(model.MAIN_CHECKED_IN_KEY)) {
1164                    v = null;
1165                } else if (key.equals(model.MAIN_MINOR_VERSION_KEY) || key.equals(model.MAIN_MAJOR_VERSION_KEY)) {
1166                    // present if reset version (regular copy, not checkin)
1167                    v = null;
1168                } else {
1169                    throw new RuntimeException(column.toString());
1170                }
1171                column.setToPreparedStatement(ps, i++, v);
1172                if (debugValues != null) {
1173                    debugValues.add(v);
1174                }
1175            }
1176            // last parameter is for 'WHERE "id" = ?'
1177            Column whereColumn = copy.whereColumns.get(0);
1178            whereColumn.setToPreparedStatement(ps, i, id);
1179            if (debugValues != null) {
1180                debugValues.add(id);
1181                logger.logSQL(copy.sql, debugValues);
1182            }
1183            int count = ps.executeUpdate();
1184            countExecute();
1185            logger.logCount(count);
1186
1187            // TODO DB_IDENTITY
1188            // post insert fetch idrow
1189
1190            idMap.put(id, newId);
1191            return newId;
1192        } finally {
1193            try {
1194                closeStatement(ps);
1195            } catch (SQLException e) {
1196                logger.error(e.getMessage(), e);
1197            }
1198        }
1199    }
1200
1201    /**
1202     * Gets the children ids and types of a node.
1203     */
1204    protected List<IdWithTypes> getChildrenIdsWithTypes(Serializable id, boolean onlyComplex) throws SQLException {
1205        List<IdWithTypes> children = new LinkedList<IdWithTypes>();
1206        String sql = sqlInfo.getSelectChildrenIdsAndTypesSql(onlyComplex);
1207        if (logger.isLogEnabled()) {
1208            logger.logSQL(sql, Collections.singletonList(id));
1209        }
1210        List<Column> columns = sqlInfo.getSelectChildrenIdsAndTypesWhatColumns();
1211        PreparedStatement ps = connection.prepareStatement(sql);
1212        ResultSet rs = null;
1213        try {
1214            List<String> debugValues = null;
1215            if (logger.isLogEnabled()) {
1216                debugValues = new LinkedList<String>();
1217            }
1218            dialect.setId(ps, 1, id); // parent id
1219            rs = ps.executeQuery();
1220            countExecute();
1221            while (rs.next()) {
1222                Serializable childId = null;
1223                String childPrimaryType = null;
1224                String[] childMixinTypes = null;
1225                int i = 1;
1226                for (Column column : columns) {
1227                    String key = column.getKey();
1228                    Serializable value = column.getFromResultSet(rs, i++);
1229                    if (key.equals(model.MAIN_KEY)) {
1230                        childId = value;
1231                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1232                        childPrimaryType = (String) value;
1233                    } else if (key.equals(model.MAIN_MIXIN_TYPES_KEY)) {
1234                        childMixinTypes = (String[]) value;
1235                    }
1236                }
1237                children.add(new IdWithTypes(childId, childPrimaryType, childMixinTypes));
1238                if (debugValues != null) {
1239                    debugValues.add(childId + "/" + childPrimaryType + "/" + Arrays.toString(childMixinTypes));
1240                }
1241            }
1242            if (debugValues != null) {
1243                logger.log("  -> " + debugValues);
1244            }
1245            return children;
1246        } finally {
1247            try {
1248                closeStatement(ps);
1249            } catch (SQLException e) {
1250                logger.error(e.getMessage(), e);
1251            }
1252        }
1253    }
1254
1255    /**
1256     * Copy the rows from tableName with given ids into new ones with new ids given by idMap.
1257     * <p>
1258     * A new row with id {@code overwriteId} is first deleted.
1259     *
1260     * @return {@link Boolean#TRUE} for a modification or creation, {@link Boolean#FALSE} for a deletion, {@code null}
1261     *         otherwise (still absent)
1262     * @throws SQLException
1263     */
1264    protected Boolean copyRows(String tableName, Set<Serializable> ids, Map<Serializable, Serializable> idMap,
1265            Serializable overwriteId) throws SQLException {
1266        String copySql = sqlInfo.getCopySql(tableName);
1267        Column copyIdColumn = sqlInfo.getCopyIdColumn(tableName);
1268        PreparedStatement copyPs = connection.prepareStatement(copySql);
1269        String deleteSql = sqlInfo.getDeleteSql(tableName);
1270        PreparedStatement deletePs = connection.prepareStatement(deleteSql);
1271        try {
1272            boolean before = false;
1273            boolean after = false;
1274            for (Serializable id : ids) {
1275                Serializable newId = idMap.get(id);
1276                boolean overwrite = newId.equals(overwriteId);
1277                if (overwrite) {
1278                    // remove existing first
1279                    if (logger.isLogEnabled()) {
1280                        logger.logSQL(deleteSql, Collections.singletonList(newId));
1281                    }
1282                    dialect.setId(deletePs, 1, newId);
1283                    int delCount = deletePs.executeUpdate();
1284                    countExecute();
1285                    logger.logCount(delCount);
1286                    before = delCount > 0;
1287                }
1288                copyIdColumn.setToPreparedStatement(copyPs, 1, newId);
1289                copyIdColumn.setToPreparedStatement(copyPs, 2, id);
1290                if (logger.isLogEnabled()) {
1291                    logger.logSQL(copySql, Arrays.asList(newId, id));
1292                }
1293                int copyCount = copyPs.executeUpdate();
1294                countExecute();
1295                logger.logCount(copyCount);
1296                if (overwrite) {
1297                    after = copyCount > 0;
1298                }
1299            }
1300            // * , n -> mod (TRUE)
1301            // n , 0 -> del (FALSE)
1302            // 0 , 0 -> null
1303            return after ? Boolean.TRUE : (before ? Boolean.FALSE : null);
1304        } finally {
1305            try {
1306                closeStatement(copyPs);
1307                closeStatement(deletePs);
1308            } catch (SQLException e) {
1309                logger.error(e.getMessage(), e);
1310            }
1311        }
1312    }
1313
1314    @Override
1315    public List<NodeInfo> remove(NodeInfo rootInfo) {
1316        Serializable rootId = rootInfo.id;
1317        List<NodeInfo> info = getDescendantsInfo(rootId);
1318        info.add(rootInfo);
1319        if (sqlInfo.softDeleteEnabled) {
1320            deleteRowsSoft(info);
1321        } else {
1322            deleteRowsDirect(model.HIER_TABLE_NAME, Collections.singleton(rootId));
1323        }
1324        return info;
1325    }
1326
1327    protected List<NodeInfo> getDescendantsInfo(Serializable rootId) {
1328        if (!dialect.supportsFastDescendants()) {
1329            return getDescendantsInfoIterative(rootId);
1330        }
1331        List<NodeInfo> descendants = new LinkedList<NodeInfo>();
1332        String sql = sqlInfo.getSelectDescendantsInfoSql();
1333        if (logger.isLogEnabled()) {
1334            logger.logSQL(sql, Collections.singletonList(rootId));
1335        }
1336        List<Column> columns = sqlInfo.getSelectDescendantsInfoWhatColumns();
1337        PreparedStatement ps = null;
1338        try {
1339            ps = connection.prepareStatement(sql);
1340            List<String> debugValues = null;
1341            if (logger.isLogEnabled()) {
1342                debugValues = new LinkedList<String>();
1343            }
1344            dialect.setId(ps, 1, rootId); // parent id
1345            ResultSet rs = ps.executeQuery();
1346            countExecute();
1347            while (rs.next()) {
1348                Serializable id = null;
1349                Serializable parentId = null;
1350                String primaryType = null;
1351                Boolean isProperty = null;
1352                Serializable targetId = null;
1353                Serializable versionableId = null;
1354                int i = 1;
1355                for (Column column : columns) {
1356                    String key = column.getKey();
1357                    Serializable value = column.getFromResultSet(rs, i++);
1358                    if (key.equals(model.MAIN_KEY)) {
1359                        id = value;
1360                    } else if (key.equals(model.HIER_PARENT_KEY)) {
1361                        parentId = value;
1362                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1363                        primaryType = (String) value;
1364                    } else if (key.equals(model.HIER_CHILD_ISPROPERTY_KEY)) {
1365                        isProperty = (Boolean) value;
1366                    } else if (key.equals(model.PROXY_TARGET_KEY)) {
1367                        targetId = value;
1368                    } else if (key.equals(model.PROXY_VERSIONABLE_KEY)) {
1369                        versionableId = value;
1370                    }
1371                    // no mixins (not useful to caller)
1372                    // no versions (not fileable)
1373                }
1374                descendants.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1375                if (debugValues != null) {
1376                    if (debugValues.size() < DEBUG_MAX_TREE) {
1377                        debugValues.add(id + "/" + primaryType);
1378                    }
1379                }
1380            }
1381            if (debugValues != null) {
1382                if (debugValues.size() >= DEBUG_MAX_TREE) {
1383                    debugValues.add("... (" + descendants.size() + ") results");
1384                }
1385                logger.log("  -> " + debugValues);
1386            }
1387            return descendants;
1388        } catch (SQLException e) {
1389            throw new NuxeoException("Failed to get descendants", e);
1390        } finally {
1391            try {
1392                closeStatement(ps);
1393            } catch (SQLException e) {
1394                logger.error(e.getMessage(), e);
1395            }
1396        }
1397    }
1398
1399    protected List<NodeInfo> getDescendantsInfoIterative(Serializable rootId) {
1400        Set<Serializable> done = new HashSet<>();
1401        List<Serializable> todo = new ArrayList<>(Collections.singleton(rootId));
1402        List<NodeInfo> descendants = new ArrayList<NodeInfo>();
1403        while (!todo.isEmpty()) {
1404            List<NodeInfo> infos = getChildrenNodeInfos(todo);
1405            todo = new ArrayList<>();
1406            for (NodeInfo info : infos) {
1407                Serializable id = info.id;
1408                if (!done.add(id)) {
1409                    continue;
1410                }
1411                todo.add(id);
1412                descendants.add(info);
1413            }
1414        }
1415        return descendants;
1416    }
1417
1418    /**
1419     * Gets the children of a node as a list of NodeInfo.
1420     */
1421    protected List<NodeInfo> getChildrenNodeInfos(Collection<Serializable> ids) {
1422        List<NodeInfo> children = new LinkedList<NodeInfo>();
1423        SQLInfoSelect select = sqlInfo.getSelectChildrenNodeInfos(ids.size());
1424        if (logger.isLogEnabled()) {
1425            logger.logSQL(select.sql, ids);
1426        }
1427        Column where = select.whereColumns.get(0);
1428        PreparedStatement ps = null;
1429        ResultSet rs = null;
1430        try {
1431            ps = connection.prepareStatement(select.sql);
1432            List<String> debugValues = null;
1433            if (logger.isLogEnabled()) {
1434                debugValues = new LinkedList<String>();
1435            }
1436            int ii = 1;
1437            for (Serializable id : ids) {
1438                where.setToPreparedStatement(ps, ii++, id);
1439            }
1440            rs = ps.executeQuery();
1441            countExecute();
1442            while (rs.next()) {
1443                Serializable id = null;
1444                Serializable parentId = null;
1445                String primaryType = null;
1446                Boolean isProperty = Boolean.FALSE;
1447                Serializable targetId = null;
1448                Serializable versionableId = null;
1449                int i = 1;
1450                for (Column column : select.whatColumns) {
1451                    String key = column.getKey();
1452                    Serializable value = column.getFromResultSet(rs, i++);
1453                    if (key.equals(model.MAIN_KEY)) {
1454                        id = value;
1455                    } else if (key.equals(model.HIER_PARENT_KEY)) {
1456                        parentId = value;
1457                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1458                        primaryType = (String) value;
1459                    } else if (key.equals(model.PROXY_TARGET_KEY)) {
1460                        targetId = value;
1461                    } else if (key.equals(model.PROXY_VERSIONABLE_KEY)) {
1462                        versionableId = value;
1463                    }
1464                }
1465                children.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1466                if (debugValues != null) {
1467                    if (debugValues.size() < DEBUG_MAX_TREE) {
1468                        debugValues.add(id + "/" + primaryType);
1469                    }
1470                }
1471            }
1472            if (debugValues != null) {
1473                if (debugValues.size() >= DEBUG_MAX_TREE) {
1474                    debugValues.add("... (" + children.size() + ") results");
1475                }
1476                logger.log("  -> " + debugValues);
1477            }
1478            return children;
1479        } catch (SQLException e) {
1480            throw new NuxeoException("Failed to get descendants", e);
1481        } finally {
1482            try {
1483                closeStatement(ps, rs);
1484            } catch (SQLException e) {
1485                logger.error(e.getMessage(), e);
1486            }
1487        }
1488    }
1489
1490}