001/*
002 * (C) Copyright 2006-2011 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.sql.jdbc;
020
021import java.io.Serializable;
022import java.sql.Array;
023import java.sql.BatchUpdateException;
024import java.sql.CallableStatement;
025import java.sql.PreparedStatement;
026import java.sql.ResultSet;
027import java.sql.SQLException;
028import java.sql.Types;
029import java.util.ArrayList;
030import java.util.Arrays;
031import java.util.Calendar;
032import java.util.Collection;
033import java.util.Collections;
034import java.util.HashMap;
035import java.util.HashSet;
036import java.util.LinkedHashMap;
037import java.util.LinkedList;
038import java.util.List;
039import java.util.ListIterator;
040import java.util.Map;
041import java.util.Map.Entry;
042import java.util.Set;
043
044import javax.sql.XADataSource;
045import javax.transaction.xa.XAException;
046import javax.transaction.xa.Xid;
047
048import org.apache.commons.lang.StringUtils;
049import org.nuxeo.ecm.core.api.NuxeoException;
050import org.nuxeo.ecm.core.api.model.Delta;
051import org.nuxeo.ecm.core.storage.sql.ClusterInvalidator;
052import org.nuxeo.ecm.core.query.QueryFilter;
053import org.nuxeo.ecm.core.storage.sql.Invalidations;
054import org.nuxeo.ecm.core.storage.sql.InvalidationsPropagator;
055import org.nuxeo.ecm.core.storage.sql.RowMapper.NodeInfo;
056import org.nuxeo.ecm.core.storage.sql.InvalidationsQueue;
057import org.nuxeo.ecm.core.storage.sql.Mapper;
058import org.nuxeo.ecm.core.storage.sql.Model;
059import org.nuxeo.ecm.core.storage.sql.PropertyType;
060import org.nuxeo.ecm.core.storage.sql.Row;
061import org.nuxeo.ecm.core.storage.sql.RowId;
062import org.nuxeo.ecm.core.storage.sql.RowMapper;
063import org.nuxeo.ecm.core.storage.sql.SelectionType;
064import org.nuxeo.ecm.core.storage.sql.SimpleFragment;
065import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelect;
066import org.nuxeo.ecm.core.storage.sql.jdbc.SQLInfo.SQLInfoSelection;
067import org.nuxeo.ecm.core.storage.sql.jdbc.db.Column;
068import org.nuxeo.ecm.core.storage.sql.jdbc.db.Table;
069import org.nuxeo.ecm.core.storage.sql.jdbc.db.Update;
070
071/**
072 * A {@link JDBCRowMapper} maps {@link Row}s to and from a JDBC database.
073 */
074public class JDBCRowMapper extends JDBCConnection implements RowMapper {
075
076    public static final int UPDATE_BATCH_SIZE = 100; // also insert/delete
077
078    public static final int DEBUG_MAX_TREE = 50;
079
080    /**
081     * Cluster invalidator, or {@code null} if this mapper does not participate in invalidation propagation (cluster
082     * invalidator, lock manager).
083     */
084    private final ClusterInvalidator clusterInvalidator;
085
086    private final InvalidationsPropagator invalidationsPropagator;
087
088    public JDBCRowMapper(Model model, SQLInfo sqlInfo, XADataSource xadatasource, ClusterInvalidator clusterInvalidator,
089            InvalidationsPropagator invalidationsPropagator, boolean noSharing) {
090        super(model, sqlInfo, xadatasource, noSharing);
091        this.clusterInvalidator = clusterInvalidator;
092        this.invalidationsPropagator = invalidationsPropagator;
093    }
094
095    @Override
096    public Invalidations receiveInvalidations() {
097        if (clusterInvalidator != null) {
098            Invalidations invalidations = clusterInvalidator.receiveInvalidations();
099            // send received invalidations to all mappers
100            if (invalidations != null && !invalidations.isEmpty()) {
101                invalidationsPropagator.propagateInvalidations(invalidations, null);
102            }
103            return invalidations;
104        } else {
105            return null;
106        }
107    }
108
109
110    @Override
111    public void sendInvalidations(Invalidations invalidations) {
112        if (clusterInvalidator != null) {
113            clusterInvalidator.sendInvalidations(invalidations);
114        }
115    }
116
117    @Override
118    public void clearCache() {
119        // no cache
120    }
121
122    @Override
123    public long getCacheSize() {
124        return 0;
125    }
126
127    @Override
128    public void rollback(Xid xid) throws XAException {
129        try {
130            xaresource.rollback(xid);
131        } catch (XAException e) {
132            logger.error("XA error on rollback: " + e);
133            throw e;
134        }
135    }
136
137    protected CollectionIO getCollectionIO(String tableName) {
138        return tableName.equals(model.ACL_TABLE_NAME) ? ACLCollectionIO.INSTANCE : ScalarCollectionIO.INSTANCE;
139    }
140
141    @Override
142    public Serializable generateNewId() {
143        try {
144            return dialect.getGeneratedId(connection);
145        } catch (SQLException e) {
146            throw new NuxeoException(e);
147        }
148    }
149
150    /*
151     * ----- RowIO -----
152     */
153
154    @Override
155    public List<? extends RowId> read(Collection<RowId> rowIds, boolean cacheOnly) {
156        List<RowId> res = new ArrayList<RowId>(rowIds.size());
157        if (cacheOnly) {
158            // return no data
159            for (RowId rowId : rowIds) {
160                res.add(new RowId(rowId));
161            }
162            return res;
163        }
164        // reorganize by table
165        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
166        for (RowId rowId : rowIds) {
167            Set<Serializable> ids = tableIds.get(rowId.tableName);
168            if (ids == null) {
169                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
170            }
171            ids.add(rowId.id);
172        }
173        // read on each table
174        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
175            String tableName = en.getKey();
176            Set<Serializable> ids = new HashSet<Serializable>(en.getValue());
177            int size = ids.size();
178            int chunkSize = sqlInfo.getMaximumArgsForIn();
179            List<Row> rows;
180            if (size > chunkSize) {
181                List<Serializable> idList = new ArrayList<Serializable>(ids);
182                rows = new ArrayList<Row>(size);
183                for (int start = 0; start < size; start += chunkSize) {
184                    int end = start + chunkSize;
185                    if (end > size) {
186                        end = size;
187                    }
188                    // needs to be Serializable -> copy
189                    List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
190                    List<Row> chunkRows;
191                    if (model.isCollectionFragment(tableName)) {
192                        chunkRows = readCollectionArrays(tableName, chunkIds);
193                    } else {
194                        chunkRows = readSimpleRows(tableName, chunkIds);
195                    }
196                    rows.addAll(chunkRows);
197                }
198            } else {
199                if (model.isCollectionFragment(tableName)) {
200                    rows = readCollectionArrays(tableName, ids);
201                } else {
202                    rows = readSimpleRows(tableName, ids);
203                }
204            }
205            // check we have all the ids (readSimpleRows may have some
206            // missing)
207            for (Row row : rows) {
208                res.add(row);
209                ids.remove(row.id);
210            }
211            // for the missing ids record an empty RowId
212            for (Serializable id : ids) {
213                res.add(new RowId(tableName, id));
214            }
215        }
216        return res;
217    }
218
219    /**
220     * Gets a list of rows for {@link SimpleFragment}s from the database, given the table name and the ids.
221     *
222     * @param tableName the table name
223     * @param ids the ids
224     * @return the list of rows, without the missing ones
225     */
226    protected List<Row> readSimpleRows(String tableName, Collection<Serializable> ids) {
227        if (ids.isEmpty()) {
228            return Collections.emptyList();
229        }
230        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size());
231        Map<String, Serializable> criteriaMap = Collections.singletonMap(model.MAIN_KEY, (Serializable) ids);
232        return getSelectRows(tableName, select, criteriaMap, null, false);
233    }
234
235    /**
236     * Reads several collection rows, given a table name and the ids.
237     *
238     * @param tableName the table name
239     * @param ids the ids
240     */
241    protected List<Row> readCollectionArrays(String tableName, Collection<Serializable> ids) {
242        if (ids.isEmpty()) {
243            return Collections.emptyList();
244        }
245        String[] orderBys = { model.MAIN_KEY, model.COLL_TABLE_POS_KEY }; // clusters
246                                                                          // results
247        Set<String> skipColumns = new HashSet<String>(Arrays.asList(model.COLL_TABLE_POS_KEY));
248        SQLInfoSelect select = sqlInfo.getSelectFragmentsByIds(tableName, ids.size(), orderBys, skipColumns);
249
250        String sql = select.sql;
251        try {
252            if (logger.isLogEnabled()) {
253                logger.logSQL(sql, ids);
254            }
255            PreparedStatement ps = connection.prepareStatement(sql);
256            ResultSet rs = null;
257            try {
258                int i = 1;
259                for (Serializable id : ids) {
260                    dialect.setId(ps, i++, id);
261                }
262                rs = ps.executeQuery();
263                countExecute();
264
265                // get all values from result set, separate by ids
266                // the result set is ordered by id, pos
267                CollectionIO io = getCollectionIO(tableName);
268                PropertyType ftype = model.getCollectionFragmentType(tableName);
269                PropertyType type = ftype.getArrayBaseType();
270                Serializable curId = null;
271                List<Serializable> list = null;
272                Serializable[] returnId = new Serializable[1];
273                int[] returnPos = { -1 };
274                List<Row> res = new LinkedList<Row>();
275                Set<Serializable> remainingIds = new HashSet<Serializable>(ids);
276                while (rs.next()) {
277                    Serializable value = io.getCurrentFromResultSet(rs, select.whatColumns, model, returnId, returnPos);
278                    Serializable newId = returnId[0];
279                    if (newId != null && !newId.equals(curId)) {
280                        // flush old list
281                        if (list != null) {
282                            res.add(new Row(tableName, curId, type.collectionToArray(list)));
283                            remainingIds.remove(curId);
284                        }
285                        curId = newId;
286                        list = new ArrayList<Serializable>();
287                    }
288                    list.add(value);
289                }
290                if (curId != null && list != null) {
291                    // flush last list
292                    res.add(new Row(tableName, curId, type.collectionToArray(list)));
293                    remainingIds.remove(curId);
294                }
295
296                // fill empty ones
297                if (!remainingIds.isEmpty()) {
298                    Serializable[] emptyArray = ftype.getEmptyArray();
299                    for (Serializable id : remainingIds) {
300                        res.add(new Row(tableName, id, emptyArray));
301                    }
302                }
303                if (logger.isLogEnabled()) {
304                    for (Row row : res) {
305                        logger.log("  -> " + row);
306                    }
307                }
308                return res;
309            } finally {
310                closeStatement(ps, rs);
311            }
312        } catch (SQLException e) {
313            throw new NuxeoException("Could not select: " + sql, e);
314        }
315    }
316
317    /**
318     * Fetches the rows for a select with fixed criteria given as two maps (a criteriaMap whose values and up in the
319     * returned rows, and a joinMap for other criteria).
320     */
321    protected List<Row> getSelectRows(String tableName, SQLInfoSelect select, Map<String, Serializable> criteriaMap,
322            Map<String, Serializable> joinMap, boolean limitToOne) {
323        List<Row> list = new LinkedList<Row>();
324        if (select.whatColumns.isEmpty()) {
325            // happens when we fetch a fragment whose columns are all opaque
326            // check it's a by-id query
327            if (select.whereColumns.size() == 1 && select.whereColumns.get(0).getKey() == model.MAIN_KEY
328                    && joinMap == null) {
329                Row row = new Row(tableName, criteriaMap);
330                if (select.opaqueColumns != null) {
331                    for (Column column : select.opaqueColumns) {
332                        row.putNew(column.getKey(), Row.OPAQUE);
333                    }
334                }
335                list.add(row);
336                return list;
337            }
338            // else do a useless select but the criteria are more complex and we
339            // can't shortcut
340        }
341        if (joinMap == null) {
342            joinMap = Collections.emptyMap();
343        }
344        PreparedStatement ps = null;
345        ResultSet rs = null;
346        try {
347            ps = connection.prepareStatement(select.sql);
348
349            /*
350             * Compute where part.
351             */
352            List<Serializable> debugValues = null;
353            if (logger.isLogEnabled()) {
354                debugValues = new LinkedList<Serializable>();
355            }
356            int i = 1;
357            for (Column column : select.whereColumns) {
358                String key = column.getKey();
359                Serializable v;
360                if (criteriaMap.containsKey(key)) {
361                    v = criteriaMap.get(key);
362                } else if (joinMap.containsKey(key)) {
363                    v = joinMap.get(key);
364                } else {
365                    throw new RuntimeException(key);
366                }
367                if (v == null) {
368                    throw new NuxeoException("Null value for key: " + key);
369                }
370                if (v instanceof Collection<?>) {
371                    // allow insert of several values, for the IN (...) case
372                    for (Object vv : (Collection<?>) v) {
373                        column.setToPreparedStatement(ps, i++, (Serializable) vv);
374                        if (debugValues != null) {
375                            debugValues.add((Serializable) vv);
376                        }
377                    }
378                } else {
379                    column.setToPreparedStatement(ps, i++, v);
380                    if (debugValues != null) {
381                        debugValues.add(v);
382                    }
383                }
384            }
385            if (debugValues != null) {
386                logger.logSQL(select.sql, debugValues);
387            }
388
389            /*
390             * Execute query.
391             */
392            rs = ps.executeQuery();
393            countExecute();
394
395            /*
396             * Construct the maps from the result set.
397             */
398            while (rs.next()) {
399                Row row = new Row(tableName, criteriaMap);
400                i = 1;
401                for (Column column : select.whatColumns) {
402                    row.put(column.getKey(), column.getFromResultSet(rs, i++));
403                }
404                if (select.opaqueColumns != null) {
405                    for (Column column : select.opaqueColumns) {
406                        row.putNew(column.getKey(), Row.OPAQUE);
407                    }
408                }
409                if (logger.isLogEnabled()) {
410                    logger.logResultSet(rs, select.whatColumns);
411                }
412                list.add(row);
413                if (limitToOne) {
414                    return list;
415                }
416            }
417            if (limitToOne) {
418                return Collections.emptyList();
419            }
420            return list;
421        } catch (SQLException e) {
422            checkConcurrentUpdate(e);
423            throw new NuxeoException("Could not select: " + select.sql, e);
424        } finally {
425            try {
426                closeStatement(ps, rs);
427            } catch (SQLException e) {
428                logger.error(e.getMessage(), e);
429            }
430        }
431    }
432
433    @Override
434    public void write(RowBatch batch) {
435        if (!batch.creates.isEmpty()) {
436            writeCreates(batch.creates);
437        }
438        if (!batch.updates.isEmpty()) {
439            writeUpdates(batch.updates);
440        }
441        if (!batch.deletes.isEmpty()) {
442            writeDeletes(batch.deletes);
443        }
444        // batch.deletesDependent not executed
445    }
446
447    protected void writeCreates(List<Row> creates) {
448        // reorganize by table
449        Map<String, List<Row>> tableRows = new LinkedHashMap<String, List<Row>>();
450        // hierarchy table first because there are foreign keys to it
451        tableRows.put(model.HIER_TABLE_NAME, new LinkedList<Row>());
452        for (Row row : creates) {
453            List<Row> rows = tableRows.get(row.tableName);
454            if (rows == null) {
455                tableRows.put(row.tableName, rows = new LinkedList<Row>());
456            }
457            rows.add(row);
458        }
459        // inserts on each table
460        for (Entry<String, List<Row>> en : tableRows.entrySet()) {
461            String tableName = en.getKey();
462            List<Row> rows = en.getValue();
463            if (model.isCollectionFragment(tableName)) {
464                insertCollectionRows(tableName, rows);
465            } else {
466                insertSimpleRows(tableName, rows);
467            }
468        }
469    }
470
471    protected void writeUpdates(Set<RowUpdate> updates) {
472        // reorganize by table
473        Map<String, List<RowUpdate>> tableRows = new HashMap<String, List<RowUpdate>>();
474        for (RowUpdate rowu : updates) {
475            List<RowUpdate> rows = tableRows.get(rowu.row.tableName);
476            if (rows == null) {
477                tableRows.put(rowu.row.tableName, rows = new LinkedList<RowUpdate>());
478            }
479            rows.add(rowu);
480        }
481        // updates on each table
482        for (Entry<String, List<RowUpdate>> en : tableRows.entrySet()) {
483            String tableName = en.getKey();
484            List<RowUpdate> rows = en.getValue();
485            if (model.isCollectionFragment(tableName)) {
486                updateCollectionRows(tableName, rows);
487            } else {
488                updateSimpleRows(tableName, rows);
489            }
490        }
491    }
492
493    protected void writeDeletes(Collection<RowId> deletes) {
494        // reorganize by table
495        Map<String, Set<Serializable>> tableIds = new HashMap<String, Set<Serializable>>();
496        for (RowId rowId : deletes) {
497            Set<Serializable> ids = tableIds.get(rowId.tableName);
498            if (ids == null) {
499                tableIds.put(rowId.tableName, ids = new HashSet<Serializable>());
500            }
501            ids.add(rowId.id);
502        }
503        // delete on each table
504        for (Entry<String, Set<Serializable>> en : tableIds.entrySet()) {
505            String tableName = en.getKey();
506            Set<Serializable> ids = en.getValue();
507            deleteRows(tableName, ids);
508        }
509    }
510
511    /**
512     * Inserts multiple rows, all for the same table.
513     */
514    protected void insertSimpleRows(String tableName, List<Row> rows) {
515        if (rows.isEmpty()) {
516            return;
517        }
518        String sql = sqlInfo.getInsertSql(tableName);
519        if (sql == null) {
520            throw new NuxeoException("Unknown table: " + tableName);
521        }
522        String loggedSql = supportsBatchUpdates && rows.size() > 1 ? sql + " -- BATCHED" : sql;
523        List<Column> columns = sqlInfo.getInsertColumns(tableName);
524        try {
525            PreparedStatement ps = connection.prepareStatement(sql);
526            try {
527                int batch = 0;
528                for (Row row : rows) {
529                    batch++;
530                    if (logger.isLogEnabled()) {
531                        logger.logSQL(loggedSql, columns, row);
532                    }
533                    int i = 1;
534                    for (Column column : columns) {
535                        column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
536                    }
537                    if (supportsBatchUpdates) {
538                        ps.addBatch();
539                        if (batch % UPDATE_BATCH_SIZE == 0) {
540                            ps.executeBatch();
541                            countExecute();
542                        }
543                    } else {
544                        ps.execute();
545                        countExecute();
546                    }
547                }
548                if (supportsBatchUpdates) {
549                    ps.executeBatch();
550                    countExecute();
551                }
552            } finally {
553                closeStatement(ps);
554            }
555        } catch (SQLException e) {
556            if (e instanceof BatchUpdateException) {
557                BatchUpdateException bue = (BatchUpdateException) e;
558                if (e.getCause() == null && bue.getNextException() != null) {
559                    // provide a readable cause in the stack trace
560                    e.initCause(bue.getNextException());
561                }
562            }
563            checkConcurrentUpdate(e);
564            throw new NuxeoException("Could not insert: " + sql, e);
565        }
566    }
567
568    /**
569     * Updates multiple collection rows, all for the same table.
570     */
571    protected void insertCollectionRows(String tableName, List<Row> rows) {
572        if (rows.isEmpty()) {
573            return;
574        }
575        String sql = sqlInfo.getInsertSql(tableName);
576        List<Column> columns = sqlInfo.getInsertColumns(tableName);
577        CollectionIO io = getCollectionIO(tableName);
578        try {
579            PreparedStatement ps = connection.prepareStatement(sql);
580            try {
581                io.executeInserts(ps, rows, columns, supportsBatchUpdates, sql, this);
582            } finally {
583                closeStatement(ps);
584            }
585        } catch (SQLException e) {
586            throw new NuxeoException("Could not insert: " + sql, e);
587        }
588    }
589
590    /**
591     * Updates multiple simple rows, all for the same table.
592     */
593    protected void updateSimpleRows(String tableName, List<RowUpdate> rows) {
594        if (rows.isEmpty()) {
595            return;
596        }
597
598        // reorganize by unique sets of keys + which ones are for delta updates
599        Map<String, List<RowUpdate>> updatesByCanonKeys = new HashMap<>();
600        Map<String, Collection<String>> keysByCanonKeys = new HashMap<>();
601        Map<String, Set<String>> deltasByCanonKeys = new HashMap<>();
602        for (RowUpdate rowu : rows) {
603            List<String> keys = new ArrayList<String>(rowu.keys);
604            if (keys.isEmpty()) {
605                continue;
606            }
607            Set<String> deltas = new HashSet<>();
608            for (ListIterator<String> it = keys.listIterator(); it.hasNext();) {
609                String key = it.next();
610                Serializable value = rowu.row.get(key);
611                if (value instanceof Delta) {
612                    deltas.add(key);
613                    it.set(key + '+');
614                }
615            }
616            Collections.sort(keys);
617            String ck = StringUtils.join(keys, ','); // canonical keys
618            List<RowUpdate> keysUpdates = updatesByCanonKeys.get(ck);
619            if (keysUpdates == null) {
620                updatesByCanonKeys.put(ck, keysUpdates = new LinkedList<RowUpdate>());
621                keysByCanonKeys.put(ck, rowu.keys);
622                deltasByCanonKeys.put(ck, deltas);
623            }
624            keysUpdates.add(rowu);
625        }
626
627        for (String ck : updatesByCanonKeys.keySet()) {
628            List<RowUpdate> keysUpdates = updatesByCanonKeys.get(ck);
629            Collection<String> keys = keysByCanonKeys.get(ck);
630            Set<String> deltas = deltasByCanonKeys.get(ck);
631            SQLInfoSelect update = sqlInfo.getUpdateById(tableName, keys, deltas);
632            String loggedSql = supportsBatchUpdates && rows.size() > 1 ? update.sql + " -- BATCHED" : update.sql;
633            try {
634                PreparedStatement ps = connection.prepareStatement(update.sql);
635                int batch = 0;
636                try {
637                    for (RowUpdate rowu : keysUpdates) {
638                        batch++;
639                        if (logger.isLogEnabled()) {
640                            logger.logSQL(loggedSql, update.whatColumns, rowu.row, deltas);
641                        }
642                        int i = 1;
643                        for (Column column : update.whatColumns) {
644                            Serializable value = rowu.row.get(column.getKey());
645                            if (value instanceof Delta) {
646                                value = ((Delta) value).getDeltaValue();
647                            }
648                            column.setToPreparedStatement(ps, i++, value);
649                        }
650                        if (supportsBatchUpdates) {
651                            ps.addBatch();
652                            if (batch % UPDATE_BATCH_SIZE == 0) {
653                                int[] counts = ps.executeBatch();
654                                countExecute();
655                                logger.logCounts(counts);
656                            }
657                        } else {
658                            int count = ps.executeUpdate();
659                            countExecute();
660                            logger.logCount(count);
661                        }
662                    }
663                    if (supportsBatchUpdates) {
664                        int[] counts = ps.executeBatch();
665                        countExecute();
666                        logger.logCounts(counts);
667                    }
668                } finally {
669                    closeStatement(ps);
670                }
671            } catch (SQLException e) {
672                throw new NuxeoException("Could not update: " + update.sql, e);
673            }
674        }
675    }
676
677    protected void updateCollectionRows(String tableName, List<RowUpdate> rowus) {
678        Set<Serializable> ids = new HashSet<Serializable>(rowus.size());
679        List<Row> rows = new ArrayList<Row>(rowus.size());
680        for (RowUpdate rowu : rowus) {
681            ids.add(rowu.row.id);
682            rows.add(rowu.row);
683        }
684        deleteRows(tableName, ids);
685        insertCollectionRows(tableName, rows);
686    }
687
688    /**
689     * Deletes multiple rows, all for the same table.
690     */
691    protected void deleteRows(String tableName, Set<Serializable> ids) {
692        if (ids.isEmpty()) {
693            return;
694        }
695        int size = ids.size();
696        int chunkSize = sqlInfo.getMaximumArgsForIn();
697        if (size > chunkSize) {
698            List<Serializable> idList = new ArrayList<Serializable>(ids);
699            for (int start = 0; start < size; start += chunkSize) {
700                int end = start + chunkSize;
701                if (end > size) {
702                    end = size;
703                }
704                // needs to be Serializable -> copy
705                List<Serializable> chunkIds = new ArrayList<Serializable>(idList.subList(start, end));
706                deleteRowsDirect(tableName, chunkIds);
707            }
708        } else {
709            deleteRowsDirect(tableName, ids);
710        }
711    }
712
713    protected void deleteRowsSoft(List<NodeInfo> nodeInfos) {
714        try {
715            int size = nodeInfos.size();
716            List<Serializable> ids = new ArrayList<Serializable>(size);
717            for (NodeInfo info : nodeInfos) {
718                ids.add(info.id);
719            }
720            int chunkSize = 100; // max size of ids array
721            if (size <= chunkSize) {
722                doSoftDeleteRows(ids);
723            } else {
724                for (int start = 0; start < size;) {
725                    int end = start + chunkSize;
726                    if (end > size) {
727                        end = size;
728                    }
729                    doSoftDeleteRows(ids.subList(start, end));
730                    start = end;
731                }
732            }
733        } catch (SQLException e) {
734            throw new NuxeoException("Could not soft delete", e);
735        }
736    }
737
738    // not chunked
739    protected void doSoftDeleteRows(List<Serializable> ids) throws SQLException {
740        Serializable whereIds = newIdArray(ids);
741        Calendar now = Calendar.getInstance();
742        String sql = sqlInfo.getSoftDeleteSql();
743        if (logger.isLogEnabled()) {
744            logger.logSQL(sql, Arrays.asList(whereIds, now));
745        }
746        PreparedStatement ps = connection.prepareStatement(sql);
747        try {
748            setToPreparedStatementIdArray(ps, 1, whereIds);
749            dialect.setToPreparedStatementTimestamp(ps, 2, now, null);
750            ps.execute();
751            countExecute();
752            return;
753        } finally {
754            try {
755                closeStatement(ps);
756            } catch (SQLException e) {
757                logger.error(e.getMessage(), e);
758            }
759        }
760    }
761
762    protected Serializable newIdArray(Collection<Serializable> ids) {
763        if (dialect.supportsArrays()) {
764            return ids.toArray(); // Object[]
765        } else {
766            // join with '|'
767            StringBuilder b = new StringBuilder();
768            for (Serializable id : ids) {
769                b.append(id);
770                b.append('|');
771            }
772            b.setLength(b.length() - 1);
773            return b.toString();
774        }
775    }
776
777    protected void setToPreparedStatementIdArray(PreparedStatement ps, int index, Serializable idArray)
778            throws SQLException {
779        if (idArray instanceof String) {
780            ps.setString(index, (String) idArray);
781        } else {
782            Array array = dialect.createArrayOf(Types.OTHER, (Object[]) idArray, connection);
783            ps.setArray(index, array);
784        }
785    }
786
787    /**
788     * Clean up soft-deleted rows.
789     * <p>
790     * Rows deleted more recently than the beforeTime are left alone. Only a limited number of rows may be deleted, to
791     * prevent transaction during too long.
792     *
793     * @param max the maximum number of rows to delete at a time
794     * @param beforeTime the maximum deletion time of the rows to delete
795     * @return the number of rows deleted
796     */
797    public int cleanupDeletedRows(int max, Calendar beforeTime) {
798        if (max < 0) {
799            max = 0;
800        }
801        String sql = sqlInfo.getSoftDeleteCleanupSql();
802        if (logger.isLogEnabled()) {
803            logger.logSQL(sql, Arrays.<Serializable> asList(beforeTime, Long.valueOf(max)));
804        }
805        try {
806            if (sql.startsWith("{")) {
807                // callable statement
808                boolean outFirst = sql.startsWith("{?=");
809                int outIndex = outFirst ? 1 : 3;
810                int inIndex = outFirst ? 2 : 1;
811                CallableStatement cs = connection.prepareCall(sql);
812                try {
813                    cs.setInt(inIndex, max);
814                    dialect.setToPreparedStatementTimestamp(cs, inIndex + 1, beforeTime, null);
815                    cs.registerOutParameter(outIndex, Types.INTEGER);
816                    cs.execute();
817                    int count = cs.getInt(outIndex);
818                    logger.logCount(count);
819                    return count;
820                } finally {
821                    cs.close();
822                }
823            } else {
824                // standard prepared statement with result set
825                PreparedStatement ps = connection.prepareStatement(sql);
826                try {
827                    ps.setInt(1, max);
828                    dialect.setToPreparedStatementTimestamp(ps, 2, beforeTime, null);
829                    ResultSet rs = ps.executeQuery();
830                    countExecute();
831                    if (!rs.next()) {
832                        throw new NuxeoException("Cannot get result");
833                    }
834                    int count = rs.getInt(1);
835                    logger.logCount(count);
836                    return count;
837                } finally {
838                    closeStatement(ps);
839                }
840            }
841        } catch (SQLException e) {
842            throw new NuxeoException("Could not purge soft delete", e);
843        }
844    }
845
846    protected void deleteRowsDirect(String tableName, Collection<Serializable> ids) {
847        try {
848            String sql = sqlInfo.getDeleteSql(tableName, ids.size());
849            if (logger.isLogEnabled()) {
850                logger.logSQL(sql, ids);
851            }
852            PreparedStatement ps = connection.prepareStatement(sql);
853            try {
854                int i = 1;
855                for (Serializable id : ids) {
856                    dialect.setId(ps, i++, id);
857                }
858                int count = ps.executeUpdate();
859                countExecute();
860                logger.logCount(count);
861            } finally {
862                closeStatement(ps);
863            }
864        } catch (SQLException e) {
865            checkConcurrentUpdate(e);
866            throw new NuxeoException("Could not delete: " + tableName, e);
867        }
868    }
869
870    @Override
871    public Row readSimpleRow(RowId rowId) {
872        SQLInfoSelect select = sqlInfo.selectFragmentById.get(rowId.tableName);
873        Map<String, Serializable> criteriaMap = Collections.singletonMap(model.MAIN_KEY, rowId.id);
874        List<Row> maps = getSelectRows(rowId.tableName, select, criteriaMap, null, true);
875        return maps.isEmpty() ? null : maps.get(0);
876    }
877
878    @Override
879    public Map<String, String> getBinaryFulltext(RowId rowId) {
880        ArrayList<String> columns = new ArrayList<String>();
881        for (String index : model.getFulltextConfiguration().indexesAllBinary) {
882            String col = Model.FULLTEXT_BINARYTEXT_KEY + model.getFulltextIndexSuffix(index);
883            columns.add(col);
884        }
885        Serializable id = rowId.id;
886        Map<String, String> ret = new HashMap<String, String>(columns.size());
887        String sql = dialect.getBinaryFulltextSql(columns);
888        if (sql == null) {
889            logger.info("getBinaryFulltextSql not supported for dialect " + dialect);
890            return ret;
891        }
892        if (logger.isLogEnabled()) {
893            logger.logSQL(sql, Collections.singletonList(id));
894        }
895        PreparedStatement ps = null;
896        ResultSet rs = null;
897        try {
898            ps = connection.prepareStatement(sql);
899            try {
900                dialect.setId(ps, 1, id);
901                rs = ps.executeQuery();
902                while (rs.next()) {
903                    for (int i = 1; i <= columns.size(); i++) {
904                        ret.put(columns.get(i - 1), rs.getString(i));
905                    }
906                }
907                if (logger.isLogEnabled()) {
908                    logger.log("  -> " + ret);
909                }
910            } finally {
911                closeStatement(ps, rs);
912            }
913        } catch (SQLException e) {
914            throw new NuxeoException("Could not select: " + sql, e);
915        }
916        return ret;
917    }
918
919    @Override
920    public Serializable[] readCollectionRowArray(RowId rowId) {
921        String tableName = rowId.tableName;
922        Serializable id = rowId.id;
923        String sql = sqlInfo.selectFragmentById.get(tableName).sql;
924        try {
925            // XXX statement should be already prepared
926            if (logger.isLogEnabled()) {
927                logger.logSQL(sql, Collections.singletonList(id));
928            }
929            PreparedStatement ps = connection.prepareStatement(sql);
930            ResultSet rs = null;
931            try {
932                List<Column> columns = sqlInfo.selectFragmentById.get(tableName).whatColumns;
933                dialect.setId(ps, 1, id); // assumes only one primary column
934                rs = ps.executeQuery();
935                countExecute();
936
937                // construct the resulting collection using each row
938                CollectionIO io = getCollectionIO(tableName);
939                List<Serializable> list = new ArrayList<Serializable>();
940                Serializable[] returnId = new Serializable[1];
941                int[] returnPos = { -1 };
942                while (rs.next()) {
943                    list.add(io.getCurrentFromResultSet(rs, columns, model, returnId, returnPos));
944                }
945                PropertyType type = model.getCollectionFragmentType(tableName).getArrayBaseType();
946                Serializable[] array = type.collectionToArray(list);
947
948                if (logger.isLogEnabled()) {
949                    logger.log("  -> " + Arrays.asList(array));
950                }
951                return array;
952            } finally {
953                closeStatement(ps, rs);
954            }
955        } catch (SQLException e) {
956            throw new NuxeoException("Could not select: " + sql, e);
957        }
958    }
959
960    @Override
961    public List<Row> readSelectionRows(SelectionType selType, Serializable selId, Serializable filter,
962            Serializable criterion, boolean limitToOne) {
963        SQLInfoSelection selInfo = sqlInfo.getSelection(selType);
964        Map<String, Serializable> criteriaMap = new HashMap<String, Serializable>();
965        criteriaMap.put(selType.selKey, selId);
966        SQLInfoSelect select;
967        if (filter == null) {
968            select = selInfo.selectAll;
969        } else {
970            select = selInfo.selectFiltered;
971            criteriaMap.put(selType.filterKey, filter);
972        }
973        if (selType.criterionKey != null) {
974            criteriaMap.put(selType.criterionKey, criterion);
975        }
976        return getSelectRows(selType.tableName, select, criteriaMap, null, limitToOne);
977    }
978
979    @Override
980    public CopyResult copy(IdWithTypes source, Serializable destParentId, String destName, Row overwriteRow) {
981        // assert !model.separateMainTable; // other case not implemented
982        Invalidations invalidations = new Invalidations();
983        try {
984            Map<Serializable, Serializable> idMap = new LinkedHashMap<Serializable, Serializable>();
985            Map<Serializable, IdWithTypes> idToTypes = new HashMap<Serializable, IdWithTypes>();
986            // copy the hierarchy fragments recursively
987            Serializable overwriteId = overwriteRow == null ? null : overwriteRow.id;
988            if (overwriteId != null) {
989                // overwrite hier root with explicit values
990                String tableName = model.HIER_TABLE_NAME;
991                updateSimpleRowWithValues(tableName, overwriteRow);
992                idMap.put(source.id, overwriteId);
993                // invalidate
994                invalidations.addModified(new RowId(tableName, overwriteId));
995            }
996            // create the new hierarchy by copy
997            boolean resetVersion = destParentId != null;
998            Serializable newRootId = copyHierRecursive(source, destParentId, destName, overwriteId, resetVersion,
999                    idMap, idToTypes);
1000            // invalidate children
1001            Serializable invalParentId = overwriteId == null ? destParentId : overwriteId;
1002            if (invalParentId != null) { // null for a new version
1003                invalidations.addModified(new RowId(Invalidations.PARENT, invalParentId));
1004            }
1005            // copy all collected fragments
1006            Set<Serializable> proxyIds = new HashSet<Serializable>();
1007            for (Entry<String, Set<Serializable>> entry : model.getPerFragmentIds(idToTypes).entrySet()) {
1008                String tableName = entry.getKey();
1009                if (tableName.equals(model.HIER_TABLE_NAME)) {
1010                    // already done
1011                    continue;
1012                }
1013                if (tableName.equals(model.VERSION_TABLE_NAME)) {
1014                    // versions not fileable
1015                    // restore must not copy versions either
1016                    continue;
1017                }
1018                Set<Serializable> ids = entry.getValue();
1019                if (tableName.equals(model.PROXY_TABLE_NAME)) {
1020                    for (Serializable id : ids) {
1021                        proxyIds.add(idMap.get(id)); // copied ids
1022                    }
1023                }
1024                Boolean invalidation = copyRows(tableName, ids, idMap, overwriteId);
1025                if (invalidation != null) {
1026                    // overwrote something
1027                    // make sure things are properly invalidated in this and
1028                    // other sessions
1029                    if (Boolean.TRUE.equals(invalidation)) {
1030                        invalidations.addModified(new RowId(tableName, overwriteId));
1031                    } else {
1032                        invalidations.addDeleted(new RowId(tableName, overwriteId));
1033                    }
1034                }
1035            }
1036            return new CopyResult(newRootId, invalidations, proxyIds);
1037        } catch (SQLException e) {
1038            throw new NuxeoException("Could not copy: " + source.id.toString(), e);
1039        }
1040    }
1041
1042    /**
1043     * Updates a row in the database with given explicit values.
1044     */
1045    protected void updateSimpleRowWithValues(String tableName, Row row) {
1046        Update update = sqlInfo.getUpdateByIdForKeys(tableName, row.getKeys());
1047        Table table = update.getTable();
1048        String sql = update.getStatement();
1049        try {
1050            PreparedStatement ps = connection.prepareStatement(sql);
1051            try {
1052                if (logger.isLogEnabled()) {
1053                    List<Serializable> values = new LinkedList<Serializable>();
1054                    values.addAll(row.getValues());
1055                    values.add(row.id); // id last in SQL
1056                    logger.logSQL(sql, values);
1057                }
1058                int i = 1;
1059                List<String> keys = row.getKeys();
1060                List<Serializable> values = row.getValues();
1061                int size = keys.size();
1062                for (int r = 0; r < size; r++) {
1063                    String key = keys.get(r);
1064                    Serializable value = values.get(r);
1065                    table.getColumn(key).setToPreparedStatement(ps, i++, value);
1066                }
1067                dialect.setId(ps, i, row.id); // id last in SQL
1068                int count = ps.executeUpdate();
1069                countExecute();
1070                logger.logCount(count);
1071            } finally {
1072                closeStatement(ps);
1073            }
1074        } catch (SQLException e) {
1075            throw new NuxeoException("Could not update: " + sql, e);
1076        }
1077    }
1078
1079    /**
1080     * Copies hierarchy from id to parentId, and recurses.
1081     * <p>
1082     * If name is {@code null}, then the original name is kept.
1083     * <p>
1084     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1085     * filled with the type of each (source) fragment.
1086     * <p>
1087     * TODO: this should be optimized to use a stored procedure.
1088     *
1089     * @param overwriteId when not {@code null}, the copy is done onto this existing node (skipped)
1090     * @return the new root id
1091     */
1092    protected Serializable copyHierRecursive(IdWithTypes source, Serializable parentId, String name,
1093            Serializable overwriteId, boolean resetVersion, Map<Serializable, Serializable> idMap,
1094            Map<Serializable, IdWithTypes> idToTypes) throws SQLException {
1095        idToTypes.put(source.id, source);
1096        Serializable newId;
1097        if (overwriteId == null) {
1098            newId = copyHier(source.id, parentId, name, resetVersion, idMap);
1099        } else {
1100            newId = overwriteId;
1101            idMap.put(source.id, newId);
1102        }
1103        // recurse in children
1104        boolean onlyComplex = parentId == null;
1105        for (IdWithTypes child : getChildrenIdsWithTypes(source.id, onlyComplex)) {
1106            copyHierRecursive(child, newId, null, null, resetVersion, idMap, idToTypes);
1107        }
1108        return newId;
1109    }
1110
1111    /**
1112     * Copies hierarchy from id to a new child of parentId.
1113     * <p>
1114     * If name is {@code null}, then the original name is kept.
1115     * <p>
1116     * {@code idMap} is filled with info about the correspondence between original and copied ids. {@code idType} is
1117     * filled with the type of each (source) fragment.
1118     *
1119     * @return the new id
1120     */
1121    protected Serializable copyHier(Serializable id, Serializable parentId, String name, boolean resetVersion,
1122            Map<Serializable, Serializable> idMap) throws SQLException {
1123        boolean explicitName = name != null;
1124
1125        SQLInfoSelect copy = sqlInfo.getCopyHier(explicitName, resetVersion);
1126        PreparedStatement ps = connection.prepareStatement(copy.sql);
1127        try {
1128            Serializable newId = generateNewId();
1129
1130            List<Serializable> debugValues = null;
1131            if (logger.isLogEnabled()) {
1132                debugValues = new ArrayList<Serializable>(4);
1133            }
1134            int i = 1;
1135            for (Column column : copy.whatColumns) {
1136                String key = column.getKey();
1137                Serializable v;
1138                if (key.equals(model.HIER_PARENT_KEY)) {
1139                    v = parentId;
1140                } else if (key.equals(model.HIER_CHILD_NAME_KEY)) {
1141                    // present if name explicitely set (first iteration)
1142                    v = name;
1143                } else if (key.equals(model.MAIN_KEY)) {
1144                    // present if APP_UUID generation
1145                    v = newId;
1146                } else if (key.equals(model.MAIN_BASE_VERSION_KEY) || key.equals(model.MAIN_CHECKED_IN_KEY)) {
1147                    v = null;
1148                } else if (key.equals(model.MAIN_MINOR_VERSION_KEY) || key.equals(model.MAIN_MAJOR_VERSION_KEY)) {
1149                    // present if reset version (regular copy, not checkin)
1150                    v = null;
1151                } else {
1152                    throw new RuntimeException(column.toString());
1153                }
1154                column.setToPreparedStatement(ps, i++, v);
1155                if (debugValues != null) {
1156                    debugValues.add(v);
1157                }
1158            }
1159            // last parameter is for 'WHERE "id" = ?'
1160            Column whereColumn = copy.whereColumns.get(0);
1161            whereColumn.setToPreparedStatement(ps, i, id);
1162            if (debugValues != null) {
1163                debugValues.add(id);
1164                logger.logSQL(copy.sql, debugValues);
1165            }
1166            int count = ps.executeUpdate();
1167            countExecute();
1168            logger.logCount(count);
1169
1170            // TODO DB_IDENTITY
1171            // post insert fetch idrow
1172
1173            idMap.put(id, newId);
1174            return newId;
1175        } finally {
1176            try {
1177                closeStatement(ps);
1178            } catch (SQLException e) {
1179                logger.error(e.getMessage(), e);
1180            }
1181        }
1182    }
1183
1184    /**
1185     * Gets the children ids and types of a node.
1186     */
1187    protected List<IdWithTypes> getChildrenIdsWithTypes(Serializable id, boolean onlyComplex) throws SQLException {
1188        List<IdWithTypes> children = new LinkedList<IdWithTypes>();
1189        String sql = sqlInfo.getSelectChildrenIdsAndTypesSql(onlyComplex);
1190        if (logger.isLogEnabled()) {
1191            logger.logSQL(sql, Collections.singletonList(id));
1192        }
1193        List<Column> columns = sqlInfo.getSelectChildrenIdsAndTypesWhatColumns();
1194        PreparedStatement ps = connection.prepareStatement(sql);
1195        ResultSet rs = null;
1196        try {
1197            List<String> debugValues = null;
1198            if (logger.isLogEnabled()) {
1199                debugValues = new LinkedList<String>();
1200            }
1201            dialect.setId(ps, 1, id); // parent id
1202            rs = ps.executeQuery();
1203            countExecute();
1204            while (rs.next()) {
1205                Serializable childId = null;
1206                String childPrimaryType = null;
1207                String[] childMixinTypes = null;
1208                int i = 1;
1209                for (Column column : columns) {
1210                    String key = column.getKey();
1211                    Serializable value = column.getFromResultSet(rs, i++);
1212                    if (key.equals(model.MAIN_KEY)) {
1213                        childId = value;
1214                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1215                        childPrimaryType = (String) value;
1216                    } else if (key.equals(model.MAIN_MIXIN_TYPES_KEY)) {
1217                        childMixinTypes = (String[]) value;
1218                    }
1219                }
1220                children.add(new IdWithTypes(childId, childPrimaryType, childMixinTypes));
1221                if (debugValues != null) {
1222                    debugValues.add(childId + "/" + childPrimaryType + "/" + Arrays.toString(childMixinTypes));
1223                }
1224            }
1225            if (debugValues != null) {
1226                logger.log("  -> " + debugValues);
1227            }
1228            return children;
1229        } finally {
1230            try {
1231                closeStatement(ps);
1232            } catch (SQLException e) {
1233                logger.error(e.getMessage(), e);
1234            }
1235        }
1236    }
1237
1238    /**
1239     * Copy the rows from tableName with given ids into new ones with new ids given by idMap.
1240     * <p>
1241     * A new row with id {@code overwriteId} is first deleted.
1242     *
1243     * @return {@link Boolean#TRUE} for a modification or creation, {@link Boolean#FALSE} for a deletion, {@code null}
1244     *         otherwise (still absent)
1245     * @throws SQLException
1246     */
1247    protected Boolean copyRows(String tableName, Set<Serializable> ids, Map<Serializable, Serializable> idMap,
1248            Serializable overwriteId) throws SQLException {
1249        String copySql = sqlInfo.getCopySql(tableName);
1250        Column copyIdColumn = sqlInfo.getCopyIdColumn(tableName);
1251        PreparedStatement copyPs = connection.prepareStatement(copySql);
1252        String deleteSql = sqlInfo.getDeleteSql(tableName);
1253        PreparedStatement deletePs = connection.prepareStatement(deleteSql);
1254        try {
1255            boolean before = false;
1256            boolean after = false;
1257            for (Serializable id : ids) {
1258                Serializable newId = idMap.get(id);
1259                boolean overwrite = newId.equals(overwriteId);
1260                if (overwrite) {
1261                    // remove existing first
1262                    if (logger.isLogEnabled()) {
1263                        logger.logSQL(deleteSql, Collections.singletonList(newId));
1264                    }
1265                    dialect.setId(deletePs, 1, newId);
1266                    int delCount = deletePs.executeUpdate();
1267                    countExecute();
1268                    logger.logCount(delCount);
1269                    before = delCount > 0;
1270                }
1271                copyIdColumn.setToPreparedStatement(copyPs, 1, newId);
1272                copyIdColumn.setToPreparedStatement(copyPs, 2, id);
1273                if (logger.isLogEnabled()) {
1274                    logger.logSQL(copySql, Arrays.asList(newId, id));
1275                }
1276                int copyCount = copyPs.executeUpdate();
1277                countExecute();
1278                logger.logCount(copyCount);
1279                if (overwrite) {
1280                    after = copyCount > 0;
1281                }
1282            }
1283            // * , n -> mod (TRUE)
1284            // n , 0 -> del (FALSE)
1285            // 0 , 0 -> null
1286            return after ? Boolean.TRUE : (before ? Boolean.FALSE : null);
1287        } finally {
1288            try {
1289                closeStatement(copyPs);
1290                closeStatement(deletePs);
1291            } catch (SQLException e) {
1292                logger.error(e.getMessage(), e);
1293            }
1294        }
1295    }
1296
1297    @Override
1298    public List<NodeInfo> remove(NodeInfo rootInfo) {
1299        Serializable rootId = rootInfo.id;
1300        List<NodeInfo> info = getDescendantsInfo(rootId);
1301        info.add(rootInfo);
1302        if (sqlInfo.softDeleteEnabled) {
1303            deleteRowsSoft(info);
1304        } else {
1305            deleteRowsDirect(model.HIER_TABLE_NAME, Collections.singleton(rootId));
1306        }
1307        return info;
1308    }
1309
1310    protected List<NodeInfo> getDescendantsInfo(Serializable rootId) {
1311        if (!dialect.supportsFastDescendants()) {
1312            return getDescendantsInfoIterative(rootId);
1313        }
1314        List<NodeInfo> descendants = new LinkedList<NodeInfo>();
1315        String sql = sqlInfo.getSelectDescendantsInfoSql();
1316        if (logger.isLogEnabled()) {
1317            logger.logSQL(sql, Collections.singletonList(rootId));
1318        }
1319        List<Column> columns = sqlInfo.getSelectDescendantsInfoWhatColumns();
1320        PreparedStatement ps = null;
1321        try {
1322            ps = connection.prepareStatement(sql);
1323            List<String> debugValues = null;
1324            if (logger.isLogEnabled()) {
1325                debugValues = new LinkedList<String>();
1326            }
1327            dialect.setId(ps, 1, rootId); // parent id
1328            ResultSet rs = ps.executeQuery();
1329            countExecute();
1330            while (rs.next()) {
1331                Serializable id = null;
1332                Serializable parentId = null;
1333                String primaryType = null;
1334                Boolean isProperty = null;
1335                Serializable targetId = null;
1336                Serializable versionableId = null;
1337                int i = 1;
1338                for (Column column : columns) {
1339                    String key = column.getKey();
1340                    Serializable value = column.getFromResultSet(rs, i++);
1341                    if (key.equals(model.MAIN_KEY)) {
1342                        id = value;
1343                    } else if (key.equals(model.HIER_PARENT_KEY)) {
1344                        parentId = value;
1345                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1346                        primaryType = (String) value;
1347                    } else if (key.equals(model.HIER_CHILD_ISPROPERTY_KEY)) {
1348                        isProperty = (Boolean) value;
1349                    } else if (key.equals(model.PROXY_TARGET_KEY)) {
1350                        targetId = value;
1351                    } else if (key.equals(model.PROXY_VERSIONABLE_KEY)) {
1352                        versionableId = value;
1353                    }
1354                    // no mixins (not useful to caller)
1355                    // no versions (not fileable)
1356                }
1357                descendants.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1358                if (debugValues != null) {
1359                    if (debugValues.size() < DEBUG_MAX_TREE) {
1360                        debugValues.add(id + "/" + primaryType);
1361                    }
1362                }
1363            }
1364            if (debugValues != null) {
1365                if (debugValues.size() >= DEBUG_MAX_TREE) {
1366                    debugValues.add("... (" + descendants.size() + ") results");
1367                }
1368                logger.log("  -> " + debugValues);
1369            }
1370            return descendants;
1371        } catch (SQLException e) {
1372            throw new NuxeoException("Failed to get descendants", e);
1373        } finally {
1374            try {
1375                closeStatement(ps);
1376            } catch (SQLException e) {
1377                logger.error(e.getMessage(), e);
1378            }
1379        }
1380    }
1381
1382    protected List<NodeInfo> getDescendantsInfoIterative(Serializable rootId) {
1383        Set<Serializable> done = new HashSet<>();
1384        List<Serializable> todo = new ArrayList<>(Collections.singleton(rootId));
1385        List<NodeInfo> descendants = new ArrayList<NodeInfo>();
1386        while (!todo.isEmpty()) {
1387            List<NodeInfo> infos = getChildrenNodeInfos(todo);
1388            todo = new ArrayList<>();
1389            for (NodeInfo info : infos) {
1390                Serializable id = info.id;
1391                if (!done.add(id)) {
1392                    continue;
1393                }
1394                todo.add(id);
1395                descendants.add(info);
1396            }
1397        }
1398        return descendants;
1399    }
1400
1401    /**
1402     * Gets the children of a node as a list of NodeInfo.
1403     */
1404    protected List<NodeInfo> getChildrenNodeInfos(Collection<Serializable> ids) {
1405        List<NodeInfo> children = new LinkedList<NodeInfo>();
1406        SQLInfoSelect select = sqlInfo.getSelectChildrenNodeInfos(ids.size());
1407        if (logger.isLogEnabled()) {
1408            logger.logSQL(select.sql, ids);
1409        }
1410        Column where = select.whereColumns.get(0);
1411        PreparedStatement ps = null;
1412        ResultSet rs = null;
1413        try {
1414            ps = connection.prepareStatement(select.sql);
1415            List<String> debugValues = null;
1416            if (logger.isLogEnabled()) {
1417                debugValues = new LinkedList<String>();
1418            }
1419            int ii = 1;
1420            for (Serializable id : ids) {
1421                where.setToPreparedStatement(ps, ii++, id);
1422            }
1423            rs = ps.executeQuery();
1424            countExecute();
1425            while (rs.next()) {
1426                Serializable id = null;
1427                Serializable parentId = null;
1428                String primaryType = null;
1429                Boolean isProperty = Boolean.FALSE;
1430                Serializable targetId = null;
1431                Serializable versionableId = null;
1432                int i = 1;
1433                for (Column column : select.whatColumns) {
1434                    String key = column.getKey();
1435                    Serializable value = column.getFromResultSet(rs, i++);
1436                    if (key.equals(model.MAIN_KEY)) {
1437                        id = value;
1438                    } else if (key.equals(model.HIER_PARENT_KEY)) {
1439                        parentId = value;
1440                    } else if (key.equals(model.MAIN_PRIMARY_TYPE_KEY)) {
1441                        primaryType = (String) value;
1442                    } else if (key.equals(model.PROXY_TARGET_KEY)) {
1443                        targetId = value;
1444                    } else if (key.equals(model.PROXY_VERSIONABLE_KEY)) {
1445                        versionableId = value;
1446                    }
1447                }
1448                children.add(new NodeInfo(id, parentId, primaryType, isProperty, versionableId, targetId));
1449                if (debugValues != null) {
1450                    if (debugValues.size() < DEBUG_MAX_TREE) {
1451                        debugValues.add(id + "/" + primaryType);
1452                    }
1453                }
1454            }
1455            if (debugValues != null) {
1456                if (debugValues.size() >= DEBUG_MAX_TREE) {
1457                    debugValues.add("... (" + children.size() + ") results");
1458                }
1459                logger.log("  -> " + debugValues);
1460            }
1461            return children;
1462        } catch (SQLException e) {
1463            throw new NuxeoException("Failed to get descendants", e);
1464        } finally {
1465            try {
1466                closeStatement(ps, rs);
1467            } catch (SQLException e) {
1468                logger.error(e.getMessage(), e);
1469            }
1470        }
1471    }
1472
1473}