001/*
002 * (C) Copyright 2006-2016 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.sql.jdbc;
020
021import java.io.Serializable;
022import java.sql.Array;
023import java.sql.BatchUpdateException;
024import java.sql.CallableStatement;
025import java.sql.PreparedStatement;
026import java.sql.ResultSet;
027import java.sql.SQLException;
028import java.sql.Types;
029import java.util.ArrayList;
030import java.util.Arrays;
031import java.util.Calendar;
032import java.util.Collection;
033import java.util.Collections;
034import java.util.HashMap;
035import java.util.HashSet;
036import java.util.LinkedHashMap;
037import java.util.LinkedList;
038import java.util.List;
039import java.util.ListIterator;
040import java.util.Map;
041import java.util.Map.Entry;
042import java.util.Set;
043import java.util.stream.Collectors;
044
045import javax.transaction.xa.XAException;
046import javax.transaction.xa.Xid;
047
048import org.apache.commons.lang.StringUtils;
049import org.nuxeo.ecm.core.api.NuxeoException;
050import org.nuxeo.ecm.core.api.ScrollResult;
051import org.nuxeo.ecm.core.api.model.Delta;
052import org.nuxeo.ecm.core.storage.sql.ClusterInvalidator;
053import org.nuxeo.ecm.core.storage.sql.Invalidations;
054import org.nuxeo.ecm.core.storage.sql.InvalidationsPropagator;
055import org.nuxeo.ecm.core.storage.sql.Model;
056import org.nuxeo.ecm.core.storage.sql.PropertyType;
057import org.nuxeo.ecm.core.storage.sql.Row;
058import org.nuxeo.ecm.core.storage.sql.RowId;
059import org.nuxeo.ecm.core.storage.sql.RowMapper;
060import org.nuxeo.ecm.core.storage.sql.SelectionType;
061import org.nuxeo.ecm.core.storage.sql.SimpleFragment;
062import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelect;
063import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelection;
064import org.nuxeo.ecm.core.storage.sql.jdbc.db.Column;
065import org.nuxeo.ecm.core.storage.sql.jdbc.db.Table;
066import org.nuxeo.ecm.core.storage.sql.jdbc.db.Update;
067import org.nuxeo.runtime.api.Framework;
068import org.nuxeo.runtime.services.config.ConfigurationService;
069
070/**
071 * A {@link JDBCRowMapper} maps {@link Row}s to and from a JDBC database.
072 */
073public class JDBCRowMapper extends JDBCConnection implements RowMapper {
074
075    public static final int UPDATE_BATCH_SIZE = 100; // also insert/delete
076
077    public static final int DEBUG_MAX_TREE = 50;
078
079    /** Property to determine whether collection appends delete all then re-insert, or are optimized for append. */
080    public static final String COLLECTION_DELETE_BEFORE_APPEND_PROP = "org.nuxeo.vcs.list-delete-before-append";
081
082    /**
083     * Cluster invalidator, or {@code null} if this mapper does not participate in invalidation propagation (cluster
084     * invalidator, lock manager).
085     */
086    private final ClusterInvalidator clusterInvalidator;
087
088    private final InvalidationsPropagator invalidationsPropagator;
089
090    private final boolean collectionDeleteBeforeAppend;
091
092    private final CollectionIO aclCollectionIO;
093
094    private final CollectionIO scalarCollectionIO;
095
096    public JDBCRowMapper(Model model, SQLInfo sqlInfo, ClusterInvalidator clusterInvalidator,
097            InvalidationsPropagator invalidationsPropagator, boolean noSharing) {
098        super(model, sqlInfo, noSharing);
099        this.clusterInvalidator = clusterInvalidator;
100        this.invalidationsPropagator = invalidationsPropagator;
101        ConfigurationService configurationService = Framework.getService(ConfigurationService.class);
102        collectionDeleteBeforeAppend = configurationService.isBooleanPropertyTrue(COLLECTION_DELETE_BEFORE_APPEND_PROP);
103        aclCollectionIO = new ACLCollectionIO(collectionDeleteBeforeAppend);
104        scalarCollectionIO = new ScalarCollectionIO(collectionDeleteBeforeAppend);
105    }
106
107    @Override
108    public Invalidations receiveInvalidations() {
109        if (clusterInvalidator != null) {
110            Invalidations invalidations = clusterInvalidator.receiveInvalidations();
111            // send received invalidations to all mappers
112            if (invalidations != null && !invalidations.isEmpty()) {
113                invalidationsPropagator.propagateInvalidations(invalidations, null);
114            }
115            return invalidations;
116        } else {
117            return null;
118        }
119    }
120
121
122    @Override
123    public void sendInvalidations(Invalidations invalidations) {
124        if (clusterInvalidator != null) {
125            clusterInvalidator.sendInvalidations(invalidations);
126        }
127    }
128
129    @Override
130    public void clearCache() {
131        // no cache
132    }
133
134    @Override
135    public long getCacheSize() {
136        return 0;
137    }
138
139    @Override
140    public void rollback(Xid xid) throws XAException {
141        try {
142            xaresource.rollback(xid);
143        } catch (XAException e) {
144            logger.error("XA error on rollback: " + e);
145            throw e;
146        }
147    }
148
149    protected CollectionIO getCollectionIO(String tableName) {
150        return tableName.equals(model.ACL_TABLE_NAME) ? aclCollectionIO : scalarCollectionIO;
151    }
152
153    @Override
154    public Serializable generateNewId() {
155        try {
156            return dialect.getGeneratedId(connection);
157        } catch (SQLException e) {
158            throw new NuxeoException(e);
159        }
160    }
161
162    /*
163     * ----- RowIO -----
164     */
165
166    @Override
167    public List<? extends RowId> read(Collection<RowId> rowIds, boolean cacheOnly) {
168        List<RowId> res = new ArrayList<RowId>(rowIds.size());
169        if (cacheOnly) {
170            // return no data
171            for (RowId rowId : rowIds) {
172                res.add(new RowId(rowId));
173            }
174            return res;
175        }
176        // reorganize by table
177        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
178        for (RowId rowId : rowIds) {
179            Set<Serializable> ids = tableIds.get(rowId.tableName);
180            if (ids == null) {
181                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
182            }
183            ids.add(rowId.id);
184        }
185        // read on each table
186        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
187            String tableName = en.getKey();
188            Set<Serializable> ids = new HashSet<Serializable>(en.getValue());
189            int size = ids.size();
190            int chunkSize = sqlInfo.getMaximumArgsForIn();
191            List<Row> rows;
192            if (size > chunkSize) {
193                List<Serializable> idList = new ArrayList<Serializable>(ids);
194                rows = new ArrayList<Row>(size);
195                for (int start = 0; start < size; start += chunkSize) {
196                    int end = start + chunkSize;
197                    if (end > size) {
198                        end = size;
199                    }
200                    // needs to be Serializable -> copy
201                    List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
202                    List<Row> chunkRows;
203                    if (model.isCollectionFragment(tableName)) {
204                        chunkRows = readCollectionArrays(tableName, chunkIds);
205                    } else {
206                        chunkRows = readSimpleRows(tableName, chunkIds);
207                    }
208                    rows.addAll(chunkRows);
209                }
210            } else {
211                if (model.isCollectionFragment(tableName)) {
212                    rows = readCollectionArrays(tableName, ids);
213                } else {
214                    rows = readSimpleRows(tableName, ids);
215                }
216            }
217            // check we have all the ids (readSimpleRows may have some
218            // missing)
219            for (Row row : rows) {
220                res.add(row);
221                ids.remove(row.id);
222            }
223            // for the missing ids record an empty RowId
224            for (Serializable id : ids) {
225                res.add(new RowId(tableName, id));
226            }
227        }
228        return res;
229    }
230
231    /**
232     * Gets a list of rows for {@link SimpleFragment}s from the database, given the table name and the ids.
233     *
234     * @param tableName the table name
235     * @param ids the ids
236     * @return the list of rows, without the missing ones
237     */
238    protected List<Row> readSimpleRows(String tableName, Collection<Serializable> ids) {
239        if (ids.isEmpty()) {
240            return Collections.emptyList();
241        }
242        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size());
243        Map<String, Serializable> criteriaMap = Collections.singletonMap(model.MAIN_KEY, (Serializable) ids);
244        return getSelectRows(tableName, select, criteriaMap, null, false);
245    }
246
247    /**
248     * Reads several collection rows, given a table name and the ids.
249     *
250     * @param tableName the table name
251     * @param ids the ids
252     */
253    protected List<Row> readCollectionArrays(String tableName, Collection<Serializable> ids) {
254        if (ids.isEmpty()) {
255            return Collections.emptyList();
256        }
257        String[] orderBys = { model.MAIN_KEY, model.COLL_TABLE_POS_KEY }; // clusters
258                                                                          // results
259        Set<String> skipColumns = new HashSet<String>(Arrays.asList(model.COLL_TABLE_POS_KEY));
260        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size(), orderBys, skipColumns);
261
262        String sql = select.sql;
263        try {
264            if (logger.isLogEnabled()) {
265                logger.logSQL(sql, ids);
266            }
267            PreparedStatement ps = connection.prepareStatement(sql);
268            ResultSet rs = null;
269            try {
270                int i = 1;
271                for (Serializable id : ids) {
272                    dialect.setId(ps, i++, id);
273                }
274                rs = ps.executeQuery();
275                countExecute();
276
277                // get all values from result set, separate by ids
278                // the result set is ordered by id, pos
279                CollectionIO io = getCollectionIO(tableName);
280                PropertyType ftype = model.getCollectionFragmentType(tableName);
281                PropertyType type = ftype.getArrayBaseType();
282                Serializable curId = null;
283                List<Serializable> list = null;
284                Serializable[] returnId = new Serializable[1];
285                int[] returnPos = { -1 };
286                List<Row> res = new LinkedList<Row>();
287                Set<Serializable> remainingIds = new HashSet<Serializable>(ids);
288                while (rs.next()) {
289                    Serializable value = io.getCurrentFromResultSet(rs, select.whatColumns, model, returnId, returnPos);
290                    Serializable newId = returnId[0];
291                    if (newId != null && !newId.equals(curId)) {
292                        // flush old list
293                        if (list != null) {
294                            res.add(new Row(tableName, curId, type.collectionToArray(list)));
295                            remainingIds.remove(curId);
296                        }
297                        curId = newId;
298                        list = new ArrayList<Serializable>();
299                    }
300                    list.add(value);
301                }
302                if (curId != null && list != null) {
303                    // flush last list
304                    res.add(new Row(tableName, curId, type.collectionToArray(list)));
305                    remainingIds.remove(curId);
306                }
307
308                // fill empty ones
309                if (!remainingIds.isEmpty()) {
310                    Serializable[] emptyArray = ftype.getEmptyArray();
311                    for (Serializable id : remainingIds) {
312                        res.add(new Row(tableName, id, emptyArray));
313                    }
314                }
315                if (logger.isLogEnabled()) {
316                    for (Row row : res) {
317                        logger.log("  -> " + row);
318                    }
319                }
320                return res;
321            } finally {
322                closeStatement(ps, rs);
323            }
324        } catch (SQLException e) {
325            throw new NuxeoException("Could not select: " + sql, e);
326        }
327    }
328
329    /**
330     * Fetches the rows for a select with fixed criteria given as two maps (a criteriaMap whose values and up in the
331     * returned rows, and a joinMap for other criteria).
332     */
333    protected List<Row> getSelectRows(String tableName, SQLInfoSelect select, Map<String, Serializable> criteriaMap,
334            Map<String, Serializable> joinMap, boolean limitToOne) {
335        List<Row> list = new LinkedList<Row>();
336        if (select.whatColumns.isEmpty()) {
337            // happens when we fetch a fragment whose columns are all opaque
338            // check it's a by-id query
339            if (select.whereColumns.size() == 1 && select.whereColumns.get(0).getKey() == model.MAIN_KEY
340                    && joinMap == null) {
341                Row row = new Row(tableName, criteriaMap);
342                if (select.opaqueColumns != null) {
343                    for (Column column : select.opaqueColumns) {
344                        row.putNew(column.getKey(), Row.OPAQUE);
345                    }
346                }
347                list.add(row);
348                return list;
349            }
350            // else do a useless select but the criteria are more complex and we
351            // can't shortcut
352        }
353        if (joinMap == null) {
354            joinMap = Collections.emptyMap();
355        }
356        PreparedStatement ps = null;
357        ResultSet rs = null;
358        try {
359            ps = connection.prepareStatement(select.sql);
360
361            /*
362             * Compute where part.
363             */
364            List<Serializable> debugValues = null;
365            if (logger.isLogEnabled()) {
366                debugValues = new LinkedList<Serializable>();
367            }
368            int i = 1;
369            for (Column column : select.whereColumns) {
370                String key = column.getKey();
371                Serializable v;
372                if (criteriaMap.containsKey(key)) {
373                    v = criteriaMap.get(key);
374                } else if (joinMap.containsKey(key)) {
375                    v = joinMap.get(key);
376                } else {
377                    throw new RuntimeException(key);
378                }
379                if (v == null) {
380                    throw new NuxeoException("Null value for key: " + key);
381                }
382                if (v instanceof Collection<?>) {
383                    // allow insert of several values, for the IN (...) case
384                    for (Object vv : (Collection<?>) v) {
385                        column.setToPreparedStatement(ps, i++, (Serializable) vv);
386                        if (debugValues != null) {
387                            debugValues.add((Serializable) vv);
388                        }
389                    }
390                } else {
391                    column.setToPreparedStatement(ps, i++, v);
392                    if (debugValues != null) {
393                        debugValues.add(v);
394                    }
395                }
396            }
397            if (debugValues != null) {
398                logger.logSQL(select.sql, debugValues);
399            }
400
401            /*
402             * Execute query.
403             */
404            rs = ps.executeQuery();
405            countExecute();
406
407            /*
408             * Construct the maps from the result set.
409             */
410            while (rs.next()) {
411                Row row = new Row(tableName, criteriaMap);
412                i = 1;
413                for (Column column : select.whatColumns) {
414                    row.put(column.getKey(), column.getFromResultSet(rs, i++));
415                }
416                if (select.opaqueColumns != null) {
417                    for (Column column : select.opaqueColumns) {
418                        row.putNew(column.getKey(), Row.OPAQUE);
419                    }
420                }
421                if (logger.isLogEnabled()) {
422                    logger.logResultSet(rs, select.whatColumns);
423                }
424                list.add(row);
425                if (limitToOne) {
426                    return list;
427                }
428            }
429            if (limitToOne) {
430                return Collections.emptyList();
431            }
432            return list;
433        } catch (SQLException e) {
434            checkConcurrentUpdate(e);
435            throw new NuxeoException("Could not select: " + select.sql, e);
436        } finally {
437            try {
438                closeStatement(ps, rs);
439            } catch (SQLException e) {
440                logger.error(e.getMessage(), e);
441            }
442        }
443    }
444
445    @Override
446    public void write(RowBatch batch) {
447        if (!batch.creates.isEmpty()) {
448            writeCreates(batch.creates);
449        }
450        if (!batch.updates.isEmpty()) {
451            writeUpdates(batch.updates);
452        }
453        if (!batch.deletes.isEmpty()) {
454            writeDeletes(batch.deletes);
455        }
456        // batch.deletesDependent not executed
457    }
458
459    protected void writeCreates(List<Row> creates) {
460        // reorganize by table
461        Map<String, List<Row>> tableRows = new LinkedHashMap<String, List<Row>>();
462        // hierarchy table first because there are foreign keys to it
463        tableRows.put(model.HIER_TABLE_NAME, new LinkedList<Row>());
464        for (Row row : creates) {
465            List<Row> rows = tableRows.get(row.tableName);
466            if (rows == null) {
467                tableRows.put(row.tableName, rows = new LinkedList<Row>());
468            }
469            rows.add(row);
470        }
471        // inserts on each table
472        for (Entry<String, List<Row>> en : tableRows.entrySet()) {
473            String tableName = en.getKey();
474            List<Row> rows = en.getValue();
475            if (model.isCollectionFragment(tableName)) {
476                List<RowUpdate> rowus = rows.stream().map(RowUpdate::new).collect(Collectors.toList());
477                insertCollectionRows(tableName, rowus);
478            } else {
479                insertSimpleRows(tableName, rows);
480            }
481        }
482    }
483
484    protected void writeUpdates(Set<RowUpdate> updates) {
485        // reorganize by table
486        Map<String, List<RowUpdate>> tableRows = new HashMap<String, List<RowUpdate>>();
487        for (RowUpdate rowu : updates) {
488            List<RowUpdate> rows = tableRows.get(rowu.row.tableName);
489            if (rows == null) {
490                tableRows.put(rowu.row.tableName, rows = new LinkedList<RowUpdate>());
491            }
492            rows.add(rowu);
493        }
494        // updates on each table
495        for (Entry<String, List<RowUpdate>> en : tableRows.entrySet()) {
496            String tableName = en.getKey();
497            List<RowUpdate> rows = en.getValue();
498            if (model.isCollectionFragment(tableName)) {
499                updateCollectionRows(tableName, rows);
500            } else {
501                updateSimpleRows(tableName, rows);
502            }
503        }
504    }
505
506    protected void writeDeletes(Collection<RowId> deletes) {
507        // reorganize by table
508        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
509        for (RowId rowId : deletes) {
510            Set<Serializable> ids = tableIds.get(rowId.tableName);
511            if (ids == null) {
512                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
513            }
514            ids.add(rowId.id);
515        }
516        // delete on each table
517        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
518            String tableName = en.getKey();
519            Set<Serializable> ids = en.getValue();
520            deleteRows(tableName, ids);
521        }
522    }
523
524    /**
525     * Inserts multiple rows, all for the same table.
526     */
527    protected void insertSimpleRows(String tableName, List<Row> rows) {
528        if (rows.isEmpty()) {
529            return;
530        }
531        String sql = sqlInfo.getInsertSql(tableName);
532        if (sql == null) {
533            throw new NuxeoException("Unknown table: " + tableName);
534        }
535        String loggedSql = supportsBatchUpdates && rows.size() > 1 ? sql + " -- BATCHED" : sql;
536        List<Column> columns = sqlInfo.getInsertColumns(tableName);
537        try {
538            PreparedStatement ps = connection.prepareStatement(sql);
539            try {
540                int batch = 0;
541                for (Row row : rows) {
542                    batch++;
543                    if (logger.isLogEnabled()) {
544                        logger.logSQL(loggedSql, columns, row);
545                    }
546                    int i = 1;
547                    for (Column column : columns) {
548                        column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
549                    }
550                    if (supportsBatchUpdates) {
551                        ps.addBatch();
552                        if (batch % UPDATE_BATCH_SIZE == 0) {
553                            ps.executeBatch();
554                            countExecute();
555                        }
556                    } else {
557                        ps.execute();
558                        countExecute();
559                    }
560                }
561                if (supportsBatchUpdates) {
562                    ps.executeBatch();
563                    countExecute();
564                }
565            } finally {
566                closeStatement(ps);
567            }
568        } catch (SQLException e) {
569            if (e instanceof BatchUpdateException) {
570                BatchUpdateException bue = (BatchUpdateException) e;
571                if (e.getCause() == null && bue.getNextException() != null) {
572                    // provide a readable cause in the stack trace
573                    e.initCause(bue.getNextException());
574                }
575            }
576            checkConcurrentUpdate(e);
577            throw new NuxeoException("Could not insert: " + sql, e);
578        }
579    }
580
581    /**
582     * Updates multiple collection rows, all for the same table.
583     */
584    protected void insertCollectionRows(String tableName, List<RowUpdate> rowus) {
585        if (rowus.isEmpty()) {
586            return;
587        }
588        String sql = sqlInfo.getInsertSql(tableName);
589        List<Column> columns = sqlInfo.getInsertColumns(tableName);
590        CollectionIO io = getCollectionIO(tableName);
591        try {
592            PreparedStatement ps = connection.prepareStatement(sql);
593            try {
594                io.executeInserts(ps, rowus, columns, supportsBatchUpdates, sql, this);
595            } finally {
596                closeStatement(ps);
597            }
598        } catch (SQLException e) {
599            throw new NuxeoException("Could not insert: " + sql, e);
600        }
601    }
602
603    /**
604     * Updates multiple simple rows, all for the same table.
605     */
606    protected void updateSimpleRows(String tableName, List<RowUpdate> rows) {
607        if (rows.isEmpty()) {
608            return;
609        }
610
611        // reorganize by unique sets of keys + which ones are for delta updates
612        Map<String, List<RowUpdate>> updatesByCanonKeys = new HashMap<>();
613        Map<String, Collection<String>> keysByCanonKeys = new HashMap<>();
614        Map<String, Set<String>> deltasByCanonKeys = new HashMap<>();
615        for (RowUpdate rowu : rows) {
616            List<String> keys = new ArrayList<String>(rowu.keys);
617            if (keys.isEmpty()) {
618                continue;
619            }
620            Set<String> deltas = new HashSet<>();
621            for (ListIterator<String> it = keys.listIterator(); it.hasNext();) {
622                String key = it.next();
623                Serializable value = rowu.row.get(key);
624                if (value instanceof Delta && ((Delta) value).getBase() != null) {
625                    deltas.add(key);
626                    it.set(key + '+');
627                }
628            }
629            Collections.sort(keys);
630            String ck = StringUtils.join(keys, ','); // canonical keys
631            List<RowUpdate> keysUpdates = updatesByCanonKeys.get(ck);
632            if (keysUpdates == null) {
633                updatesByCanonKeys.put(ck, keysUpdates = new LinkedList<RowUpdate>());
634                keysByCanonKeys.put(ck, rowu.keys);
635                deltasByCanonKeys.put(ck, deltas);
636            }
637            keysUpdates.add(rowu);
638        }
639
640        for (String ck : updatesByCanonKeys.keySet()) {
641            List<RowUpdate> keysUpdates = updatesByCanonKeys.get(ck);
642            Collection<String> keys = keysByCanonKeys.get(ck);
643            Set<String> deltas = deltasByCanonKeys.get(ck);
644            SQLInfoSelect update = sqlInfo.getUpdateById(tableName, keys, deltas);
645            String loggedSql = supportsBatchUpdates && rows.size() > 1 ? update.sql + " -- BATCHED" : update.sql;
646            try {
647                PreparedStatement ps = connection.prepareStatement(update.sql);
648                int batch = 0;
649                try {
650                    for (RowUpdate rowu : keysUpdates) {
651                        batch++;
652                        if (logger.isLogEnabled()) {
653                            logger.logSQL(loggedSql, update.whatColumns, rowu.row, deltas);
654                        }
655                        int i = 1;
656                        for (Column column : update.whatColumns) {
657                            Serializable value = rowu.row.get(column.getKey());
658                            if (value instanceof Delta) {
659                                value = ((Delta) value).getDeltaValue();
660                            }
661                            column.setToPreparedStatement(ps, i++, value);
662                        }
663                        if (supportsBatchUpdates) {
664                            ps.addBatch();
665                            if (batch % UPDATE_BATCH_SIZE == 0) {
666                                int[] counts = ps.executeBatch();
667                                countExecute();
668                                logger.logCounts(counts);
669                            }
670                        } else {
671                            int count = ps.executeUpdate();
672                            countExecute();
673                            logger.logCount(count);
674                        }
675                    }
676                    if (supportsBatchUpdates) {
677                        int[] counts = ps.executeBatch();
678                        countExecute();
679                        logger.logCounts(counts);
680                    }
681                } finally {
682                    closeStatement(ps);
683                }
684            } catch (SQLException e) {
685                checkConcurrentUpdate(e);
686                throw new NuxeoException("Could not update: " + update.sql, e);
687            }
688        }
689    }
690
691    protected void updateCollectionRows(String tableName, List<RowUpdate> rowus) {
692        Set<Serializable> deleteIds = new HashSet<>();
693        for (RowUpdate rowu : rowus) {
694            if (rowu.pos == -1 || collectionDeleteBeforeAppend) {
695                deleteIds.add(rowu.row.id);
696            }
697        }
698        deleteRows(tableName, deleteIds);
699        insertCollectionRows(tableName, rowus);
700    }
701
702    /**
703     * Deletes multiple rows, all for the same table.
704     */
705    protected void deleteRows(String tableName, Set<Serializable> ids) {
706        if (ids.isEmpty()) {
707            return;
708        }
709        int size = ids.size();
710        int chunkSize = sqlInfo.getMaximumArgsForIn();
711        if (size > chunkSize) {
712            List<Serializable> idList = new ArrayList<Serializable>(ids);
713            for (int start = 0; start < size; start += chunkSize) {
714                int end = start + chunkSize;
715                if (end > size) {
716                    end = size;
717                }
718                // needs to be Serializable -> copy
719                List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
720                deleteRowsDirect(tableName, chunkIds);
721            }
722        } else {
723            deleteRowsDirect(tableName, ids);
724        }
725    }
726
727    protected void deleteRowsSoft(List<NodeInfo> nodeInfos) {
728        try {
729            int size = nodeInfos.size();
730            List<Serializable> ids = new ArrayList<Serializable>(size);
731            for (NodeInfo info : nodeInfos) {
732                ids.add(info.id);
733            }
734            int chunkSize = 100; // max size of ids array
735            if (size <= chunkSize) {
736                doSoftDeleteRows(ids);
737            } else {
738                for (int start = 0; start < size;) {
739                    int end = start + chunkSize;
740                    if (end > size) {
741                        end = size;
742                    }
743                    doSoftDeleteRows(ids.subList(start, end));
744                    start = end;
745                }
746            }
747        } catch (SQLException e) {
748            throw new NuxeoException("Could not soft delete", e);
749        }
750    }
751
752    // not chunked
753    protected void doSoftDeleteRows(List<Serializable> ids) throws SQLException {
754        Serializable whereIds = newIdArray(ids);
755        Calendar now = Calendar.getInstance();
756        String sql = sqlInfo.getSoftDeleteSql();
757        if (logger.isLogEnabled()) {
758            logger.logSQL(sql, Arrays.asList(whereIds, now));
759        }
760        PreparedStatement ps = connection.prepareStatement(sql);
761        try {
762            setToPreparedStatementIdArray(ps, 1, whereIds);
763            dialect.setToPreparedStatementTimestamp(ps, 2, now, null);
764            ps.execute();
765            countExecute();
766            return;
767        } finally {
768            try {
769                closeStatement(ps);
770            } catch (SQLException e) {
771                logger.error(e.getMessage(), e);
772            }
773        }
774    }
775
776    protected Serializable newIdArray(Collection<Serializable> ids) {
777        if (dialect.supportsArrays()) {
778            return ids.toArray(); // Object[]
779        } else {
780            // join with '|'
781            StringBuilder b = new StringBuilder();
782            for (Serializable id : ids) {
783                b.append(id);
784                b.append('|');
785            }
786            b.setLength(b.length() - 1);
787            return b.toString();
788        }
789    }
790
791    protected void setToPreparedStatementIdArray(PreparedStatement ps, int index, Serializable idArray)
792            throws SQLException {
793        if (idArray instanceof String) {
794            ps.setString(index, (String) idArray);
795        } else {
796            Array array = dialect.createArrayOf(Types.OTHER, (Object[]) idArray, connection);
797            ps.setArray(index, array);
798        }
799    }
800
801    /**
802     * Clean up soft-deleted rows.
803     * <p>
804     * Rows deleted more recently than the beforeTime are left alone. Only a limited number of rows may be deleted, to
805     * prevent transaction during too long.
806     *
807     * @param max the maximum number of rows to delete at a time
808     * @param beforeTime the maximum deletion time of the rows to delete
809     * @return the number of rows deleted
810     */
811    public int cleanupDeletedRows(int max, Calendar beforeTime) {
812        if (max < 0) {
813            max = 0;
814        }
815        String sql = sqlInfo.getSoftDeleteCleanupSql();
816        if (logger.isLogEnabled()) {
817            logger.logSQL(sql, Arrays.<Serializable> asList(beforeTime, Long.valueOf(max)));
818        }
819        try {
820            if (sql.startsWith("{")) {
821                // callable statement
822                boolean outFirst = sql.startsWith("{?=");
823                int outIndex = outFirst ? 1 : 3;
824                int inIndex = outFirst ? 2 : 1;
825                CallableStatement cs = connection.prepareCall(sql);
826                try {
827                    cs.setInt(inIndex, max);
828                    dialect.setToPreparedStatementTimestamp(cs, inIndex + 1, beforeTime, null);
829                    cs.registerOutParameter(outIndex, Types.INTEGER);
830                    cs.execute();
831                    int count = cs.getInt(outIndex);
832                    logger.logCount(count);
833                    return count;
834                } finally {
835                    cs.close();
836                }
837            } else {
838                // standard prepared statement with result set
839                PreparedStatement ps = connection.prepareStatement(sql);
840                try {
841                    ps.setInt(1, max);
842                    dialect.setToPreparedStatementTimestamp(ps, 2, beforeTime, null);
843                    ResultSet rs = ps.executeQuery();
844                    countExecute();
845                    if (!rs.next()) {
846                        throw new NuxeoException("Cannot get result");
847                    }
848                    int count = rs.getInt(1);
849                    logger.logCount(count);
850                    return count;
851                } finally {
852                    closeStatement(ps);
853                }
854            }
855        } catch (SQLException e) {
856            throw new NuxeoException("Could not purge soft delete", e);
857        }
858    }
859
860    protected void deleteRowsDirect(String tableName, Collection<Serializable> ids) {
861        try {
862            String sql = sqlInfo.getDeleteSql(tableName, ids.size());
863            if (logger.isLogEnabled()) {
864                logger.logSQL(sql, ids);
865            }
866            PreparedStatement ps = connection.prepareStatement(sql);
867            try {
868                int i = 1;
869                for (Serializable id : ids) {
870                    dialect.setId(ps, i++, id);
871                }
872                int count = ps.executeUpdate();
873                countExecute();
874                logger.logCount(count);
875            } finally {
876                closeStatement(ps);
877            }
878        } catch (SQLException e) {
879            checkConcurrentUpdate(e);
880            throw new NuxeoException("Could not delete: " + tableName, e);
881        }
882    }
883
884    @Override
885    public Row readSimpleRow(RowId rowId) {
886        SQLInfoSelect select = sqlInfo.selectFragmentById.get(rowId.tableName);
887        Map<String, Serializable> criteriaMap = Collections.singletonMap(model.MAIN_KEY, rowId.id);
888        List<Row> maps = getSelectRows(rowId.tableName, select, criteriaMap, null, true);
889        return maps.isEmpty() ? null : maps.get(0);
890    }
891
892    @Override
893    public Map<String, String> getBinaryFulltext(RowId rowId) {
894        ArrayList<String> columns = new ArrayList<String>();
895        for (String index : model.getFulltextConfiguration().indexesAllBinary) {
896            String col = Model.FULLTEXT_BINARYTEXT_KEY + model.getFulltextIndexSuffix(index);
897            columns.add(col);
898        }
899        Serializable id = rowId.id;
900        Map<String, String> ret = new HashMap<String, String>(columns.size());
901        String sql = dialect.getBinaryFulltextSql(columns);
902        if (sql == null) {
903            logger.info("getBinaryFulltextSql not supported for dialect " + dialect);
904            return ret;
905        }
906        if (logger.isLogEnabled()) {
907            logger.logSQL(sql, Collections.singletonList(id));
908        }
909        PreparedStatement ps = null;
910        ResultSet rs = null;
911        try {
912            ps = connection.prepareStatement(sql);
913            try {
914                dialect.setId(ps, 1, id);
915                rs = ps.executeQuery();
916                while (rs.next()) {
917                    for (int i = 1; i <= columns.size(); i++) {
918                        ret.put(columns.get(i - 1), rs.getString(i));
919                    }
920                }
921                if (logger.isLogEnabled()) {
922                    logger.log("  -> " + ret);
923                }
924            } finally {
925                closeStatement(ps, rs);
926            }
927        } catch (SQLException e) {
928            throw new NuxeoException("Could not select: " + sql, e);
929        }
930        return ret;
931    }
932
933    @Override
934    public Serializable[] readCollectionRowArray(RowId rowId) {
935        String tableName = rowId.tableName;
936        Serializable id = rowId.id;
937        String sql = sqlInfo.selectFragmentById.get(tableName).sql;
938        try {
939            // XXX statement should be already prepared
940            if (logger.isLogEnabled()) {
941                logger.logSQL(sql, Collections.singletonList(id));
942            }
943            PreparedStatement ps = connection.prepareStatement(sql);
944            ResultSet rs = null;
945            try {
946                List<Column> columns = sqlInfo.selectFragmentById.get(tableName).whatColumns;
947                dialect.setId(ps, 1, id); // assumes only one primary column
948                rs = ps.executeQuery();
949                countExecute();
950
951                // construct the resulting collection using each row
952                CollectionIO io = getCollectionIO(tableName);
953                List<Serializable> list = new ArrayList<Serializable>();
954                Serializable[] returnId = new Serializable[1];
955                int[] returnPos = { -1 };
956                while (rs.next()) {
957                    list.add(io.getCurrentFromResultSet(rs, columns, model, returnId, returnPos));
958                }
959                PropertyType type = model.getCollectionFragmentType(tableName).getArrayBaseType();
960                Serializable[] array = type.collectionToArray(list);
961
962                if (logger.isLogEnabled()) {
963                    logger.log("  -> " + Arrays.asList(array));
964                }
965                return array;
966            } finally {
967                closeStatement(ps, rs);
968            }
969        } catch (SQLException e) {
970            throw new NuxeoException("Could not select: " + sql, e);
971        }
972    }
973
974    @Override
975    public List<Row> readSelectionRows(SelectionType selType, Serializable selId, Serializable filter,
976            Serializable criterion, boolean limitToOne) {
977        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
978        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
979        criteriaMap.put(selType.selKey, selId);
980        SQLInfoSelect select;
981        if (filter == null) {
982            select = selInfo.selectAll;
983        } else {
984            select = selInfo.selectFiltered;
985            criteriaMap.put(selType.filterKey, filter);
986        }
987        if (selType.criterionKey != null) {
988            criteriaMap.put(selType.criterionKey, criterion);
989        }
990        return getSelectRows(selType.tableName, select, criteriaMap, null, limitToOne);
991    }
992
993    @Override
994    public CopyResult copy(IdWithTypes source, Serializable destParentId, String destName, Row overwriteRow) {
995        // assert !model.separateMainTable; // other case not implemented
996        Invalidations invalidations = new Invalidations();
997        try {
998            Map<Serializable, Serializable> idMap = new LinkedHashMap<Serializable, Serializable>();
999            Map<Serializable, IdWithTypes> idToTypes = new HashMap<Serializable, IdWithTypes>();
1000            // copy the hierarchy fragments recursively
1001            Serializable overwriteId = overwriteRow == null ? null : overwriteRow.id;
1002            if (overwriteId != null) {
1003                // overwrite hier root with explicit values
1004                String tableName = model.HIER_TABLE_NAME;
1005                updateSimpleRowWithValues(tableName, overwriteRow);
1006                idMap.put(source.id, overwriteId);
1007                // invalidate
1008                invalidations.addModified(new RowId(tableName, overwriteId));
1009            }
1010            // create the new hierarchy by copy
1011            boolean resetVersion = destParentId != null;
1012            Serializable newRootId = copyHierRecursive(source, destParentId, destName, overwriteId, resetVersion,
1013                    idMap, idToTypes);
1014            // invalidate children
1015            Serializable invalParentId = overwriteId == null ? destParentId : overwriteId;
1016            if (invalParentId != null) { // null for a new version
1017                invalidations.addModified(new RowId(Invalidations.PARENT, invalParentId));
1018            }
1019            // copy all collected fragments
1020            Set<Serializable> proxyIds = new HashSet<Serializable>();
1021            for (Entry<String, Set<Serializable>> entry : model.getPerFragmentIds(idToTypes).entrySet()) {
1022                String tableName = entry.getKey();
1023                if (tableName.equals(model.HIER_TABLE_NAME)) {
1024                    // already done
1025                    continue;
1026                }
1027                if (tableName.equals(model.VERSION_TABLE_NAME)) {
1028                    // versions not fileable
1029                    // restore must not copy versions either
1030                    continue;
1031                }
1032                Set<Serializable> ids = entry.getValue();
1033                if (tableName.equals(model.PROXY_TABLE_NAME)) {
1034                    for (Serializable id : ids) {
1035                        proxyIds.add(idMap.get(id)); // copied ids
1036                    }
1037                }
1038                Boolean invalidation = copyRows(tableName, ids, idMap, overwriteId);
1039                if (invalidation != null) {
1040                    // overwrote something
1041                    // make sure things are properly invalidated in this and
1042                    // other sessions
1043                    if (Boolean.TRUE.equals(invalidation)) {
1044                        invalidations.addModified(new RowId(tableName, overwriteId));
1045                    } else {
1046                        invalidations.addDeleted(new RowId(tableName, overwriteId));
1047                    }
1048                }
1049            }
1050            return new CopyResult(newRootId, invalidations, proxyIds);
1051        } catch (SQLException e) {
1052            throw new NuxeoException("Could not copy: " + source.id.toString(), e);
1053        }
1054    }
1055
1056    /**
1057     * Updates a row in the database with given explicit values.
1058     */
1059    protected void updateSimpleRowWithValues(String tableName, Row row) {
1060        Update update = sqlInfo.getUpdateByIdForKeys(tableName, row.getKeys());
1061        Table table = update.getTable();
1062        String sql = update.getStatement();
1063        try {
1064            PreparedStatement ps = connection.prepareStatement(sql);
1065            try {
1066                if (logger.isLogEnabled()) {
1067                    List<Serializable> values = new LinkedList<Serializable>();
1068                    values.addAll(row.getValues());
1069                    values.add(row.id); // id last in SQL
1070                    logger.logSQL(sql, values);
1071                }
1072                int i = 1;
1073                List<String> keys = row.getKeys();
1074                List<Serializable> values = row.getValues();
1075                int size = keys.size();
1076                for (int r = 0; r < size; r++) {
1077                    String key = keys.get(r);
1078                    Serializable value = values.get(r);
1079                    table.getColumn(key).setToPreparedStatement(ps, i++, value);
1080                }
1081                dialect.setId(ps, i, row.id); // id last in SQL
1082                int count = ps.executeUpdate();
1083                countExecute();
1084                logger.logCount(count);
1085            } finally {
1086                closeStatement(ps);
1087            }
1088        } catch (SQLException e) {
1089            throw new NuxeoException("Could not update: " + sql, e);
1090        }
1091    }
1092
1093    /**
1094     * Copies hierarchy from id to parentId, and recurses.
1095     * <p>
1096     * If name is {@code null}, then the original name is kept.
1097     * <p>
1098     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1099     * filled with the type of each (source) fragment.
1100     * <p>
1101     * TODO: this should be optimized to use a stored procedure.
1102     *
1103     * @param overwriteId when not {@code null}, the copy is done onto this existing node (skipped)
1104     * @return the new root id
1105     */
1106    protected Serializable copyHierRecursive(IdWithTypes source, Serializable parentId, String name,
1107            Serializable overwriteId, boolean resetVersion, Map<Serializable, Serializable> idMap,
1108            Map<Serializable, IdWithTypes> idToTypes) throws SQLException {
1109        idToTypes.put(source.id, source);
1110        Serializable newId;
1111        if (overwriteId == null) {
1112            newId = copyHier(source.id, parentId, name, resetVersion, idMap);
1113        } else {
1114            newId = overwriteId;
1115            idMap.put(source.id, newId);
1116        }
1117        // recurse in children
1118        boolean onlyComplex = parentId == null;
1119        for (IdWithTypes child : getChildrenIdsWithTypes(source.id, onlyComplex)) {
1120            copyHierRecursive(child, newId, null, null, resetVersion, idMap, idToTypes);
1121        }
1122        return newId;
1123    }
1124
1125    /**
1126     * Copies hierarchy from id to a new child of parentId.
1127     * <p>
1128     * If name is {@code null}, then the original name is kept.
1129     * <p>
1130     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1131     * filled with the type of each (source) fragment.
1132     *
1133     * @return the new id
1134     */
1135    protected Serializable copyHier(Serializable id, Serializable parentId, String name, boolean resetVersion,
1136            Map<Serializable, Serializable> idMap) throws SQLException {
1137        boolean explicitName = name != null;
1138
1139        SQLInfoSelect copy = sqlInfo.getCopyHier(explicitName, resetVersion);
1140        PreparedStatement ps = connection.prepareStatement(copy.sql);
1141        try {
1142            Serializable newId = generateNewId();
1143
1144            List<Serializable> debugValues = null;
1145            if (logger.isLogEnabled()) {
1146                debugValues = new ArrayList<Serializable>(4);
1147            }
1148            int i = 1;
1149            for (Column column : copy.whatColumns) {
1150                String key = column.getKey();
1151                Serializable v;
1152                if (key.equals(model.HIER_PARENT_KEY)) {
1153                    v = parentId;
1154                } else if (key.equals(model.HIER_CHILD_NAME_KEY)) {
1155                    // present if name explicitely set (first iteration)
1156                    v = name;
1157                } else if (key.equals(model.MAIN_KEY)) {
1158                    // present if APP_UUID generation
1159                    v = newId;
1160                } else if (key.equals(model.MAIN_BASE_VERSION_KEY) || key.equals(model.MAIN_CHECKED_IN_KEY)) {
1161                    v = null;
1162                } else if (key.equals(model.MAIN_MINOR_VERSION_KEY) || key.equals(model.MAIN_MAJOR_VERSION_KEY)) {
1163                    // present if reset version (regular copy, not checkin)
1164                    v = null;
1165                } else {
1166                    throw new RuntimeException(column.toString());
1167                }
1168                column.setToPreparedStatement(ps, i++, v);
1169                if (debugValues != null) {
1170                    debugValues.add(v);
1171                }
1172            }
1173            // last parameter is for 'WHERE "id" = ?'
1174            Column whereColumn = copy.whereColumns.get(0);
1175            whereColumn.setToPreparedStatement(ps, i, id);
1176            if (debugValues != null) {
1177                debugValues.add(id);
1178                logger.logSQL(copy.sql, debugValues);
1179            }
1180            int count = ps.executeUpdate();
1181            countExecute();
1182            logger.logCount(count);
1183
1184            // TODO DB_IDENTITY
1185            // post insert fetch idrow
1186
1187            idMap.put(id, newId);
1188            return newId;
1189        } finally {
1190            try {
1191                closeStatement(ps);
1192            } catch (SQLException e) {
1193                logger.error(e.getMessage(), e);
1194            }
1195        }
1196    }
1197
1198    /**
1199     * Gets the children ids and types of a node.
1200     */
1201    protected List<IdWithTypes> getChildrenIdsWithTypes(Serializable id, boolean onlyComplex) throws SQLException {
1202        List<IdWithTypes> children = new LinkedList<IdWithTypes>();
1203        String sql = sqlInfo.getSelectChildrenIdsAndTypesSql(onlyComplex);
1204        if (logger.isLogEnabled()) {
1205            logger.logSQL(sql, Collections.singletonList(id));
1206        }
1207        List<Column> columns = sqlInfo.getSelectChildrenIdsAndTypesWhatColumns();
1208        PreparedStatement ps = connection.prepareStatement(sql);
1209        ResultSet rs = null;
1210        try {
1211            List<String> debugValues = null;
1212            if (logger.isLogEnabled()) {
1213                debugValues = new LinkedList<String>();
1214            }
1215            dialect.setId(ps, 1, id); // parent id
1216            rs = ps.executeQuery();
1217            countExecute();
1218            while (rs.next()) {
1219                Serializable childId = null;
1220                String childPrimaryType = null;
1221                String[] childMixinTypes = null;
1222                int i = 1;
1223                for (Column column : columns) {
1224                    String key = column.getKey();
1225                    Serializable value = column.getFromResultSet(rs, i++);
1226                    if (key.equals(model.MAIN_KEY)) {
1227                        childId = value;
1228                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1229                        childPrimaryType = (String) value;
1230                    } else if (key.equals(model.MAIN_MIXIN_TYPES_KEY)) {
1231                        childMixinTypes = (String[]) value;
1232                    }
1233                }
1234                children.add(new IdWithTypes(childId, childPrimaryType, childMixinTypes));
1235                if (debugValues != null) {
1236                    debugValues.add(childId + "/" + childPrimaryType + "/" + Arrays.toString(childMixinTypes));
1237                }
1238            }
1239            if (debugValues != null) {
1240                logger.log("  -> " + debugValues);
1241            }
1242            return children;
1243        } finally {
1244            try {
1245                closeStatement(ps);
1246            } catch (SQLException e) {
1247                logger.error(e.getMessage(), e);
1248            }
1249        }
1250    }
1251
1252    /**
1253     * Copy the rows from tableName with given ids into new ones with new ids given by idMap.
1254     * <p>
1255     * A new row with id {@code overwriteId} is first deleted.
1256     *
1257     * @return {@link Boolean#TRUE} for a modification or creation, {@link Boolean#FALSE} for a deletion, {@code null}
1258     *         otherwise (still absent)
1259     * @throws SQLException
1260     */
1261    protected Boolean copyRows(String tableName, Set<Serializable> ids, Map<Serializable, Serializable> idMap,
1262            Serializable overwriteId) throws SQLException {
1263        String copySql = sqlInfo.getCopySql(tableName);
1264        Column copyIdColumn = sqlInfo.getCopyIdColumn(tableName);
1265        PreparedStatement copyPs = connection.prepareStatement(copySql);
1266        String deleteSql = sqlInfo.getDeleteSql(tableName);
1267        PreparedStatement deletePs = connection.prepareStatement(deleteSql);
1268        try {
1269            boolean before = false;
1270            boolean after = false;
1271            for (Serializable id : ids) {
1272                Serializable newId = idMap.get(id);
1273                boolean overwrite = newId.equals(overwriteId);
1274                if (overwrite) {
1275                    // remove existing first
1276                    if (logger.isLogEnabled()) {
1277                        logger.logSQL(deleteSql, Collections.singletonList(newId));
1278                    }
1279                    dialect.setId(deletePs, 1, newId);
1280                    int delCount = deletePs.executeUpdate();
1281                    countExecute();
1282                    logger.logCount(delCount);
1283                    before = delCount > 0;
1284                }
1285                copyIdColumn.setToPreparedStatement(copyPs, 1, newId);
1286                copyIdColumn.setToPreparedStatement(copyPs, 2, id);
1287                if (logger.isLogEnabled()) {
1288                    logger.logSQL(copySql, Arrays.asList(newId, id));
1289                }
1290                int copyCount = copyPs.executeUpdate();
1291                countExecute();
1292                logger.logCount(copyCount);
1293                if (overwrite) {
1294                    after = copyCount > 0;
1295                }
1296            }
1297            // * , n -> mod (TRUE)
1298            // n , 0 -> del (FALSE)
1299            // 0 , 0 -> null
1300            return after ? Boolean.TRUE : (before ? Boolean.FALSE : null);
1301        } finally {
1302            try {
1303                closeStatement(copyPs);
1304                closeStatement(deletePs);
1305            } catch (SQLException e) {
1306                logger.error(e.getMessage(), e);
1307            }
1308        }
1309    }
1310
1311    @Override
1312    public List<NodeInfo> remove(NodeInfo rootInfo) {
1313        Serializable rootId = rootInfo.id;
1314        List<NodeInfo> info = getDescendantsInfo(rootId);
1315        info.add(rootInfo);
1316        if (sqlInfo.softDeleteEnabled) {
1317            deleteRowsSoft(info);
1318        } else {
1319            deleteRowsDirect(model.HIER_TABLE_NAME, Collections.singleton(rootId));
1320        }
1321        return info;
1322    }
1323
1324    protected List<NodeInfo> getDescendantsInfo(Serializable rootId) {
1325        if (!dialect.supportsFastDescendants()) {
1326            return getDescendantsInfoIterative(rootId);
1327        }
1328        List<NodeInfo> descendants = new LinkedList<NodeInfo>();
1329        String sql = sqlInfo.getSelectDescendantsInfoSql();
1330        if (logger.isLogEnabled()) {
1331            logger.logSQL(sql, Collections.singletonList(rootId));
1332        }
1333        List<Column> columns = sqlInfo.getSelectDescendantsInfoWhatColumns();
1334        PreparedStatement ps = null;
1335        try {
1336            ps = connection.prepareStatement(sql);
1337            List<String> debugValues = null;
1338            if (logger.isLogEnabled()) {
1339                debugValues = new LinkedList<String>();
1340            }
1341            dialect.setId(ps, 1, rootId); // parent id
1342            ResultSet rs = ps.executeQuery();
1343            countExecute();
1344            while (rs.next()) {
1345                Serializable id = null;
1346                Serializable parentId = null;
1347                String primaryType = null;
1348                Boolean isProperty = null;
1349                Serializable targetId = null;
1350                Serializable versionableId = null;
1351                int i = 1;
1352                for (Column column : columns) {
1353                    String key = column.getKey();
1354                    Serializable value = column.getFromResultSet(rs, i++);
1355                    if (key.equals(model.MAIN_KEY)) {
1356                        id = value;
1357                    } else if (key.equals(model.HIER_PARENT_KEY)) {
1358                        parentId = value;
1359                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1360                        primaryType = (String) value;
1361                    } else if (key.equals(model.HIER_CHILD_ISPROPERTY_KEY)) {
1362                        isProperty = (Boolean) value;
1363                    } else if (key.equals(model.PROXY_TARGET_KEY)) {
1364                        targetId = value;
1365                    } else if (key.equals(model.PROXY_VERSIONABLE_KEY)) {
1366                        versionableId = value;
1367                    }
1368                    // no mixins (not useful to caller)
1369                    // no versions (not fileable)
1370                }
1371                descendants.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1372                if (debugValues != null) {
1373                    if (debugValues.size() < DEBUG_MAX_TREE) {
1374                        debugValues.add(id + "/" + primaryType);
1375                    }
1376                }
1377            }
1378            if (debugValues != null) {
1379                if (debugValues.size() >= DEBUG_MAX_TREE) {
1380                    debugValues.add("... (" + descendants.size() + ") results");
1381                }
1382                logger.log("  -> " + debugValues);
1383            }
1384            return descendants;
1385        } catch (SQLException e) {
1386            throw new NuxeoException("Failed to get descendants", e);
1387        } finally {
1388            try {
1389                closeStatement(ps);
1390            } catch (SQLException e) {
1391                logger.error(e.getMessage(), e);
1392            }
1393        }
1394    }
1395
1396    protected List<NodeInfo> getDescendantsInfoIterative(Serializable rootId) {
1397        Set<Serializable> done = new HashSet<>();
1398        List<Serializable> todo = new ArrayList<>(Collections.singleton(rootId));
1399        List<NodeInfo> descendants = new ArrayList<NodeInfo>();
1400        while (!todo.isEmpty()) {
1401            List<NodeInfo> infos;
1402            int size = todo.size();
1403            int chunkSize = sqlInfo.getMaximumArgsForIn();
1404            if (size > chunkSize) {
1405                infos = new ArrayList<>();
1406                for (int start = 0; start < size; start += chunkSize) {
1407                    int end = start + chunkSize;
1408                    if (end > size) {
1409                        end = size;
1410                    }
1411                    // needs to be Serializable -> copy
1412                    List<Serializable> chunkTodo = new ArrayList<Serializable>(todo.subList(start, end));
1413                    List<NodeInfo> chunkInfos = getChildrenNodeInfos(chunkTodo);
1414                    infos.addAll(chunkInfos);
1415                }
1416            } else {
1417                infos = getChildrenNodeInfos(todo);
1418            }
1419            todo = new ArrayList<>();
1420            for (NodeInfo info : infos) {
1421                Serializable id = info.id;
1422                if (!done.add(id)) {
1423                    continue;
1424                }
1425                todo.add(id);
1426                descendants.add(info);
1427            }
1428        }
1429        return descendants;
1430    }
1431
1432    /**
1433     * Gets the children of a node as a list of NodeInfo.
1434     */
1435    protected List<NodeInfo> getChildrenNodeInfos(Collection<Serializable> ids) {
1436        List<NodeInfo> children = new LinkedList<NodeInfo>();
1437        SQLInfoSelect select = sqlInfo.getSelectChildrenNodeInfos(ids.size());
1438        if (logger.isLogEnabled()) {
1439            logger.logSQL(select.sql, ids);
1440        }
1441        Column where = select.whereColumns.get(0);
1442        PreparedStatement ps = null;
1443        ResultSet rs = null;
1444        try {
1445            ps = connection.prepareStatement(select.sql);
1446            List<String> debugValues = null;
1447            if (logger.isLogEnabled()) {
1448                debugValues = new LinkedList<String>();
1449            }
1450            int ii = 1;
1451            for (Serializable id : ids) {
1452                where.setToPreparedStatement(ps, ii++, id);
1453            }
1454            rs = ps.executeQuery();
1455            countExecute();
1456            while (rs.next()) {
1457                Serializable id = null;
1458                Serializable parentId = null;
1459                String primaryType = null;
1460                Boolean isProperty = Boolean.FALSE;
1461                Serializable targetId = null;
1462                Serializable versionableId = null;
1463                int i = 1;
1464                for (Column column : select.whatColumns) {
1465                    String key = column.getKey();
1466                    Serializable value = column.getFromResultSet(rs, i++);
1467                    if (key.equals(model.MAIN_KEY)) {
1468                        id = value;
1469                    } else if (key.equals(model.HIER_PARENT_KEY)) {
1470                        parentId = value;
1471                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1472                        primaryType = (String) value;
1473                    } else if (key.equals(model.PROXY_TARGET_KEY)) {
1474                        targetId = value;
1475                    } else if (key.equals(model.PROXY_VERSIONABLE_KEY)) {
1476                        versionableId = value;
1477                    }
1478                }
1479                children.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1480                if (debugValues != null) {
1481                    if (debugValues.size() < DEBUG_MAX_TREE) {
1482                        debugValues.add(id + "/" + primaryType);
1483                    }
1484                }
1485            }
1486            if (debugValues != null) {
1487                if (debugValues.size() >= DEBUG_MAX_TREE) {
1488                    debugValues.add("... (" + children.size() + ") results");
1489                }
1490                logger.log("  -> " + debugValues);
1491            }
1492            return children;
1493        } catch (SQLException e) {
1494            throw new NuxeoException("Failed to get descendants", e);
1495        } finally {
1496            try {
1497                closeStatement(ps, rs);
1498            } catch (SQLException e) {
1499                logger.error(e.getMessage(), e);
1500            }
1501        }
1502    }
1503
1504}