001/*
002 * (C) Copyright 2014-2020 Nuxeo (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 *     http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 *     Florent Guillaume
018 */
019package org.nuxeo.ecm.core.storage.dbs;
020
021import static java.lang.Boolean.FALSE;
022import static java.lang.Boolean.TRUE;
023import static org.nuxeo.ecm.core.api.security.SecurityConstants.BROWSE;
024import static org.nuxeo.ecm.core.api.security.SecurityConstants.EVERYONE;
025import static org.nuxeo.ecm.core.api.security.SecurityConstants.READ;
026import static org.nuxeo.ecm.core.api.security.SecurityConstants.READ_VERSION;
027import static org.nuxeo.ecm.core.api.security.SecurityConstants.UNSUPPORTED_ACL;
028import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.INITIAL_CHANGE_TOKEN;
029import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.INITIAL_SYS_CHANGE_TOKEN;
030import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ACE_GRANT;
031import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ACE_PERMISSION;
032import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ACE_STATUS;
033import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ACE_USER;
034import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ACL;
035import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ACP;
036import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ANCESTOR_IDS;
037import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_CHANGE_TOKEN;
038import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_FULLTEXT_JOBID;
039import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_ID;
040import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_IS_PROXY;
041import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_IS_VERSION;
042import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_NAME;
043import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_PARENT_ID;
044import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_POS;
045import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_PRIMARY_TYPE;
046import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_PROXY_IDS;
047import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_PROXY_TARGET_ID;
048import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_PROXY_VERSION_SERIES_ID;
049import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_READ_ACL;
050import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_SYS_CHANGE_TOKEN;
051import static org.nuxeo.ecm.core.storage.dbs.DBSDocument.KEY_VERSION_SERIES_ID;
052
053import java.io.Serializable;
054import java.util.ArrayList;
055import java.util.Arrays;
056import java.util.Collection;
057import java.util.Collections;
058import java.util.HashMap;
059import java.util.HashSet;
060import java.util.LinkedHashSet;
061import java.util.LinkedList;
062import java.util.List;
063import java.util.Map;
064import java.util.Map.Entry;
065import java.util.Set;
066import java.util.stream.Stream;
067
068import org.apache.commons.logging.Log;
069import org.apache.commons.logging.LogFactory;
070import org.nuxeo.ecm.core.BatchFinderWork;
071import org.nuxeo.ecm.core.BatchProcessorWork;
072import org.nuxeo.ecm.core.api.ConcurrentUpdateException;
073import org.nuxeo.ecm.core.api.Lock;
074import org.nuxeo.ecm.core.api.NuxeoException;
075import org.nuxeo.ecm.core.api.NuxeoPrincipal;
076import org.nuxeo.ecm.core.api.PartialList;
077import org.nuxeo.ecm.core.api.ScrollResult;
078import org.nuxeo.ecm.core.api.SystemPrincipal;
079import org.nuxeo.ecm.core.api.lock.LockManager;
080import org.nuxeo.ecm.core.api.model.DeltaLong;
081import org.nuxeo.ecm.core.api.repository.FulltextConfiguration;
082import org.nuxeo.ecm.core.api.repository.RepositoryManager;
083import org.nuxeo.ecm.core.model.BaseSession;
084import org.nuxeo.ecm.core.model.BaseSession.VersionAclMode;
085import org.nuxeo.ecm.core.query.QueryFilter;
086import org.nuxeo.ecm.core.query.sql.NXQL;
087import org.nuxeo.ecm.core.query.sql.model.OrderByClause;
088import org.nuxeo.ecm.core.schema.SchemaManager;
089import org.nuxeo.ecm.core.schema.types.Schema;
090import org.nuxeo.ecm.core.security.SecurityService;
091import org.nuxeo.ecm.core.storage.BaseDocument;
092import org.nuxeo.ecm.core.storage.FulltextExtractorWork;
093import org.nuxeo.ecm.core.storage.State;
094import org.nuxeo.ecm.core.storage.State.ListDiff;
095import org.nuxeo.ecm.core.storage.State.StateDiff;
096import org.nuxeo.ecm.core.storage.StateHelper;
097import org.nuxeo.ecm.core.storage.dbs.DBSConnection.DBSQueryOperator;
098import org.nuxeo.ecm.core.work.api.Work;
099import org.nuxeo.ecm.core.work.api.WorkManager;
100import org.nuxeo.ecm.core.work.api.WorkManager.Scheduling;
101import org.nuxeo.runtime.api.Framework;
102
103/**
104 * Transactional state for a session.
105 * <p>
106 * Until {@code save()} is called, data lives in the transient map.
107 * <p>
108 * Upon save, data is written to the repository connection.
109 * <p>
110 * If the connection is transactional, usual behavior occurs.
111 * <p>
112 * If the connection is not transactional, then at this means that other sessions can read uncommitted data. To allow
113 * rollback, save data is also kept in an undo log in order for rollback to be possible. On commit, the undo log is
114 * forgotten. On rollback, the undo log is replayed.
115 *
116 * @since 5.9.4
117 */
118public class DBSTransactionState implements LockManager, AutoCloseable {
119
120    private static final Log log = LogFactory.getLog(DBSTransactionState.class);
121
122    private static final String KEY_UNDOLOG_CREATE = "__UNDOLOG_CREATE__\0\0";
123
124    /** Keys used when computing Read ACLs. */
125    protected static final Set<String> READ_ACL_RECURSION_KEYS = new HashSet<>(
126            Arrays.asList(KEY_READ_ACL, KEY_ACP, KEY_IS_VERSION, KEY_VERSION_SERIES_ID, KEY_PARENT_ID));
127
128    public static final String READ_ACL_ASYNC_ENABLED_PROPERTY = "nuxeo.core.readacl.async.enabled";
129
130    public static final String READ_ACL_ASYNC_ENABLED_DEFAULT = "true";
131
132    public static final String READ_ACL_ASYNC_THRESHOLD_PROPERTY = "nuxeo.core.readacl.async.threshold";
133
134    public static final String READ_ACL_ASYNC_THRESHOLD_DEFAULT = "500";
135
136    protected final DBSRepository repository;
137
138    protected final DBSConnection connection;
139
140    protected final DBSSession session;
141
142    /** Retrieved and created document state. */
143    protected Map<String, DBSDocumentState> transientStates = new HashMap<>();
144
145    /** Ids of documents created but not yet saved. */
146    protected Set<String> transientCreated = new LinkedHashSet<>();
147
148    /**
149     * Document ids modified as "user changes", which means that a change token should be checked.
150     *
151     * @since 9.2
152     */
153    protected final Set<Serializable> userChangeIds = new HashSet<>();
154
155    /**
156     * Undo log.
157     * <p>
158     * A map of document ids to null or State. The value is null when the document has to be deleted when applying the
159     * undo log. Otherwise the value is a State. If the State contains the key {@link #KEY_UNDOLOG_CREATE} then the
160     * state must be re-created completely when applying the undo log, otherwise just applied as an update.
161     * <p>
162     * Null when there is no active transaction.
163     */
164    protected Map<String, State> undoLog;
165
166    protected final Set<String> browsePermissions;
167
168    protected final VersionAclMode versionAclMode;
169
170    protected final boolean disableReadVersionPermission;
171
172    public DBSTransactionState(DBSRepository repository, DBSSession session) {
173        this.repository = repository;
174        this.connection = repository.getConnection();
175        this.session = session;
176        SecurityService securityService = Framework.getService(SecurityService.class);
177        browsePermissions = new HashSet<>(Arrays.asList(securityService.getPermissionsToCheck(BROWSE)));
178        versionAclMode = VersionAclMode.getConfiguration();
179        disableReadVersionPermission = BaseSession.isReadVersionPermissionDisabled();
180    }
181
182    /**
183     * {@inheritDoc}
184     *
185     * @since 11.1
186     */
187    @Override
188    public void close() {
189        connection.close();
190    }
191
192    /** @since 11.1 */
193    public String getRootId() {
194        return connection.getRootId();
195    }
196
197    /**
198     * New transient state for something just read from the repository.
199     */
200    protected DBSDocumentState newTransientState(State state) {
201        if (state == null) {
202            return null;
203        }
204        String id = (String) state.get(KEY_ID);
205        if (transientStates.containsKey(id)) {
206            throw new IllegalStateException("Already transient: " + id);
207        }
208        DBSDocumentState docState = new DBSDocumentState(state); // copy
209        transientStates.put(id, docState);
210        return docState;
211    }
212
213    /**
214     * Returns a state and marks it as transient, because it's about to be modified or returned to user code (where it
215     * may be modified).
216     */
217    public DBSDocumentState getStateForUpdate(String id) {
218        // check transient state
219        DBSDocumentState docState = transientStates.get(id);
220        if (docState != null) {
221            return docState;
222        }
223        // fetch from repository
224        State state = connection.readState(id);
225        return newTransientState(state);
226    }
227
228    /**
229     * Returns a state which won't be modified.
230     */
231    // TODO in some cases it's good to have this kept in memory instead of
232    // rereading from database every time
233    // XXX getStateForReadOneShot
234    public State getStateForRead(String id) {
235        // check transient state
236        DBSDocumentState docState = transientStates.get(id);
237        if (docState != null) {
238            return docState.getState();
239        }
240        // fetch from repository
241        return connection.readState(id);
242    }
243
244    /**
245     * Returns states and marks them transient, because they're about to be returned to user code (where they may be
246     * modified).
247     */
248    public List<DBSDocumentState> getStatesForUpdate(Collection<String> ids) {
249        // check which ones we have to fetch from repository
250        List<String> idsToFetch = new LinkedList<>();
251        for (String id : ids) {
252            // check transient state
253            DBSDocumentState docState = transientStates.get(id);
254            if (docState != null) {
255                continue;
256            }
257            // will have to fetch it
258            idsToFetch.add(id);
259        }
260        if (!idsToFetch.isEmpty()) {
261            List<State> states = connection.readStates(idsToFetch);
262            for (State state : states) {
263                newTransientState(state);
264            }
265        }
266        // everything now fetched in transient
267        List<DBSDocumentState> docStates = new ArrayList<>(ids.size());
268        for (String id : ids) {
269            DBSDocumentState docState = transientStates.get(id);
270            if (docState == null) {
271                if (log.isTraceEnabled()) {
272                    log.trace("Cannot fetch document with id: " + id, new Throwable("debug stack trace"));
273                }
274                continue;
275            }
276            docStates.add(docState);
277        }
278        return docStates;
279    }
280
281    // XXX TODO for update or for read?
282    public DBSDocumentState getChildState(String parentId, String name) {
283        // check transient state
284        for (DBSDocumentState docState : transientStates.values()) {
285            if (!parentId.equals(docState.getParentId())) {
286                continue;
287            }
288            if (!name.equals(docState.getName())) {
289                continue;
290            }
291            return docState;
292        }
293        // fetch from repository
294        State state = connection.readChildState(parentId, name, Collections.emptySet());
295        if (state == null) {
296            return null;
297        }
298        String id = (String) state.get(KEY_ID);
299        if (transientStates.containsKey(id)) {
300            // found transient, even though we already checked
301            // that means that in-memory it's not a child, but in-database it's a child (was moved)
302            // -> ignore the database state
303            return null;
304        }
305        return newTransientState(state);
306    }
307
308    public boolean hasChild(String parentId, String name) {
309        // check transient state
310        for (DBSDocumentState docState : transientStates.values()) {
311            if (!parentId.equals(docState.getParentId())) {
312                continue;
313            }
314            if (!name.equals(docState.getName())) {
315                continue;
316            }
317            return true;
318        }
319        // check repository
320        return connection.hasChild(parentId, name, Collections.emptySet());
321    }
322
323    public List<DBSDocumentState> getChildrenStates(String parentId) {
324        return getChildrenStates(parentId, false, false);
325    }
326
327    public List<DBSDocumentState> getChildrenStates(String parentId, boolean excludeSpecialChildren,
328            boolean excludeRegularChildren) {
329        List<DBSDocumentState> docStates = new LinkedList<>();
330        Set<String> seen = new HashSet<>();
331        Set<String> specialChildrenTypes = Framework.getService(SchemaManager.class).getSpecialDocumentTypes();
332        boolean excludeChildren = excludeSpecialChildren || excludeRegularChildren;
333        // check transient state
334        for (DBSDocumentState docState : transientStates.values()) {
335            if (!parentId.equals(docState.getParentId())) {
336                continue;
337            }
338            if (excludeChildren) {
339                boolean specialChild = specialChildrenTypes.contains(docState.getPrimaryType());
340                if (excludeSpecialChildren && specialChild || excludeRegularChildren && !specialChild) {
341                    continue;
342                }
343            }
344            docStates.add(docState);
345            seen.add(docState.getId());
346        }
347        List<State> states;
348        if (!excludeChildren) {
349            states = connection.queryKeyValue(KEY_PARENT_ID, parentId, seen);
350        } else {
351            // fetch from repository
352            DBSQueryOperator operator = excludeSpecialChildren ? DBSQueryOperator.NOT_IN : DBSQueryOperator.IN;
353            states = connection.queryKeyValueWithOperator(KEY_PARENT_ID, parentId, KEY_PRIMARY_TYPE, operator,
354                    specialChildrenTypes, seen);
355        }
356        for (State state : states) {
357            String id = (String) state.get(KEY_ID);
358            if (transientStates.containsKey(id)) {
359                // found transient, even though we passed an exclusion list for known children
360                // that means that in-memory it's not a child, but in-database it's a child (was moved)
361                // -> ignore the database state
362                continue;
363            }
364            docStates.add(newTransientState(state));
365        }
366        return docStates;
367    }
368
369    public List<String> getChildrenIds(String parentId) {
370        return getChildrenIds(parentId, false, false);
371    }
372
373    public List<String> getChildrenIds(String parentId, boolean excludeSpecialChildren,
374            boolean excludeRegularChildren) {
375        List<String> children = new ArrayList<>();
376        Set<String> seen = new HashSet<>();
377        Set<String> specialChildrenTypes = Framework.getService(SchemaManager.class).getSpecialDocumentTypes();
378        boolean excludeChildren = excludeSpecialChildren || excludeRegularChildren;
379        // check transient state
380        for (DBSDocumentState docState : transientStates.values()) {
381            String id = docState.getId();
382            if (!parentId.equals(docState.getParentId())) {
383                continue;
384            }
385            if (excludeChildren) {
386                boolean specialChild = specialChildrenTypes.contains(docState.getPrimaryType());
387                if (excludeSpecialChildren && specialChild || excludeRegularChildren && !specialChild) {
388                    continue;
389                }
390            }
391            seen.add(id);
392            children.add(id);
393        }
394        List<State> states;
395        if (!excludeChildren) {
396            states = connection.queryKeyValue(KEY_PARENT_ID, parentId, seen);
397        } else {
398            // fetch from repository depending on the filter flags
399            DBSQueryOperator operator = excludeSpecialChildren ? DBSQueryOperator.NOT_IN : DBSQueryOperator.IN;
400            states = connection.queryKeyValueWithOperator(KEY_PARENT_ID, parentId, KEY_PRIMARY_TYPE, operator,
401                    specialChildrenTypes, seen);
402        }
403        for (State state : states) {
404            String id = (String) state.get(KEY_ID);
405            if (transientStates.containsKey(id)) {
406                // found transient, even though we passed an exclusion list for known children
407                // that means that in-memory it's not a child, but in-database it's a child (was moved)
408                // -> ignore the database state
409                continue;
410            }
411            children.add(id);
412        }
413        return new ArrayList<>(children);
414    }
415
416    public boolean hasChildren(String parentId) {
417        // check transient state
418        for (DBSDocumentState docState : transientStates.values()) {
419            if (!parentId.equals(docState.getParentId())) {
420                continue;
421            }
422            return true;
423        }
424        // check repository
425        return connection.queryKeyValuePresence(KEY_PARENT_ID, parentId, Collections.emptySet());
426    }
427
428    public DBSDocumentState createChild(String id, String parentId, String name, Long pos, String typeName) {
429        // id may be not-null for import
430        if (id == null) {
431            id = connection.generateNewId();
432        }
433        if (transientStates.containsKey(id)) {
434            throw new ConcurrentUpdateException(id);
435        }
436        transientCreated.add(id);
437        DBSDocumentState docState = new DBSDocumentState();
438        transientStates.put(id, docState);
439        docState.put(KEY_ID, id);
440        docState.put(KEY_PARENT_ID, parentId);
441        docState.put(KEY_ANCESTOR_IDS, getAncestorIds(parentId));
442        docState.put(KEY_NAME, name);
443        docState.put(KEY_POS, pos);
444        docState.put(KEY_PRIMARY_TYPE, typeName);
445        if (repository.isChangeTokenEnabled()) {
446            docState.put(KEY_SYS_CHANGE_TOKEN, INITIAL_SYS_CHANGE_TOKEN);
447        }
448        // update read acls for new doc
449        updateDocumentReadAcls(id);
450        return docState;
451    }
452
453    /** Gets ancestors including id itself. */
454    protected Object[] getAncestorIds(String id) {
455        if (id == null) {
456            return null;
457        }
458        State state = getStateForRead(id);
459        if (state == null) {
460            throw new RuntimeException("No such id: " + id);
461        }
462        Object[] ancestors = (Object[]) state.get(KEY_ANCESTOR_IDS);
463        if (ancestors == null) {
464            return new Object[] { id };
465        } else {
466            Object[] newAncestors = new Object[ancestors.length + 1];
467            System.arraycopy(ancestors, 0, newAncestors, 0, ancestors.length);
468            newAncestors[ancestors.length] = id;
469            return newAncestors;
470        }
471    }
472
473    /**
474     * Copies the document into a newly-created object.
475     * <p>
476     * The copy is automatically saved.
477     */
478    public DBSDocumentState copy(String id) {
479        DBSDocumentState copyState = new DBSDocumentState(getStateForRead(id));
480        String copyId = connection.generateNewId();
481        copyState.put(KEY_ID, copyId);
482        copyState.put(KEY_PROXY_IDS, null); // no proxies to this new doc
483        // other fields updated by the caller
484        transientStates.put(copyId, copyState);
485        transientCreated.add(copyId);
486        return copyState;
487    }
488
489    /**
490     * Updates ancestors recursively after a move.
491     * <p>
492     * Recursing from given doc, replace the first ndel ancestors with those passed.
493     * <p>
494     * Doesn't check transient (assumes save is done). The modifications are automatically saved.
495     */
496    public void updateAncestors(String id, int ndel, Object[] ancestorIds) {
497        int nadd = ancestorIds.length;
498        Set<String> ids = new HashSet<>();
499        ids.add(id);
500        try (Stream<State> states = getDescendants(id, Collections.emptySet(), 0)) {
501            states.forEach(state -> ids.add((String) state.get(KEY_ID)));
502        }
503        // we collect all ids first to avoid reentrancy to the repository
504        for (String cid : ids) {
505            // XXX TODO oneShot update, don't pollute transient space
506            DBSDocumentState docState = getStateForUpdate(cid);
507            Object[] ancestors = (Object[]) docState.get(KEY_ANCESTOR_IDS);
508            Object[] newAncestors;
509            if (ancestors == null) {
510                newAncestors = ancestorIds.clone();
511            } else {
512                newAncestors = new Object[ancestors.length - ndel + nadd];
513                System.arraycopy(ancestorIds, 0, newAncestors, 0, nadd);
514                System.arraycopy(ancestors, ndel, newAncestors, nadd, ancestors.length - ndel);
515            }
516            docState.put(KEY_ANCESTOR_IDS, newAncestors);
517        }
518    }
519
520    protected int getReadAclsAsyncThreshold() {
521        boolean enabled = Boolean.parseBoolean(
522                Framework.getProperty(READ_ACL_ASYNC_ENABLED_PROPERTY, READ_ACL_ASYNC_ENABLED_DEFAULT));
523        if (enabled) {
524            return Integer.parseInt(
525                    Framework.getProperty(READ_ACL_ASYNC_THRESHOLD_PROPERTY, READ_ACL_ASYNC_THRESHOLD_DEFAULT));
526        } else {
527            return 0;
528        }
529    }
530
531    /**
532     * Updates the Read ACLs recursively on a document.
533     */
534    public void updateTreeReadAcls(String id) {
535        // versions too XXX TODO
536
537        save(); // flush everything to the database
538
539        // update the doc itself
540        updateDocumentReadAcls(id);
541
542        // check if we have a small enough number of descendants that we can process them synchronously
543        int limit = getReadAclsAsyncThreshold();
544        Set<String> ids = new HashSet<>();
545        try (Stream<State> states = getDescendants(id, Collections.emptySet(), limit)) {
546            states.forEach(state -> ids.add((String) state.get(KEY_ID)));
547        }
548        if (limit == 0 || ids.size() < limit) {
549            // update all descendants synchronously
550            ids.forEach(this::updateDocumentReadAcls);
551        } else {
552            // update the direct children synchronously, the rest asynchronously
553
554            // update the direct children (with a limit in case it's too big)
555            String nxql = String.format("SELECT ecm:uuid FROM Document WHERE ecm:parentId = '%s'", id);
556            NuxeoPrincipal principal = new SystemPrincipal(null);
557            QueryFilter queryFilter = new QueryFilter(principal, null, null, null, Collections.emptyList(), limit, 0);
558            // TODO do the query with this.queryAndFetch instead of going back to the session
559            PartialList<Map<String, Serializable>> pl = session.queryProjection(nxql, NXQL.NXQL, queryFilter, false, 0,
560                    new Object[0]);
561            for (Map<String, Serializable> map : pl) {
562                String childId = (String) map.get(NXQL.ECM_UUID);
563                updateDocumentReadAcls(childId);
564            }
565
566            // asynchronous work to do the whole tree
567            nxql = String.format("SELECT ecm:uuid FROM Document WHERE ecm:ancestorId = '%s'", id);
568            Work work = new FindReadAclsWork(repository.getName(), nxql, null);
569            Framework.getService(WorkManager.class).schedule(work);
570        }
571    }
572
573    /**
574     * Work to find the ids of documents for which Read ACLs must be recomputed, and launch the needed update works.
575     *
576     * @since 9.10
577     */
578    public static class FindReadAclsWork extends BatchFinderWork {
579
580        private static final long serialVersionUID = 1L;
581
582        public FindReadAclsWork(String repositoryName, String nxql, String originatingUsername) {
583            super(repositoryName, nxql, originatingUsername);
584        }
585
586        @Override
587        public String getTitle() {
588            return "Find descendants for Read ACLs";
589        }
590
591        @Override
592        public String getCategory() {
593            return "security";
594        }
595
596        @Override
597        public int getBatchSize() {
598            return 500;
599        }
600
601        @Override
602        public Work getBatchProcessorWork(List<String> docIds) {
603            return new UpdateReadAclsWork(repositoryName, docIds, getOriginatingUsername());
604        }
605    }
606
607    /**
608     * Work to update the Read ACLs on a list of documents, without recursion.
609     *
610     * @since 9.10
611     */
612    public static class UpdateReadAclsWork extends BatchProcessorWork {
613
614        private static final long serialVersionUID = 1L;
615
616        public UpdateReadAclsWork(String repositoryName, List<String> docIds, String originatingUsername) {
617            super(repositoryName, docIds, originatingUsername);
618        }
619
620        @Override
621        public String getTitle() {
622            return "Update Read ACLs";
623        }
624
625        @Override
626        public String getCategory() {
627            return "security";
628        }
629
630        @Override
631        public int getBatchSize() {
632            return 50;
633        }
634
635        @Override
636        public void processBatch(List<String> docIds) {
637            session.updateReadACLs(docIds);
638        }
639    }
640
641    /**
642     * Updates the Read ACLs on a document (not recursively), bypassing transient space and caches for the document
643     * itself (not the ancestors, needed for ACL inheritance and for which caching is useful).
644     */
645    public void updateReadACLs(Collection<String> docIds) {
646        docIds.forEach(id -> updateDocumentReadAclsNoCache(id));
647    }
648
649    /**
650     * Updates the Read ACLs on a document (not recursively)
651     */
652    protected void updateDocumentReadAcls(String id) {
653        DBSDocumentState docState = getStateForUpdate(id);
654        docState.put(KEY_READ_ACL, getReadACL(docState.getState()));
655    }
656
657    /**
658     * Updates the Read ACLs on a document, without polluting caches.
659     * <p>
660     * When fetching parents recursively to compute inheritance, the regular transient space and repository caching are
661     * used.
662     */
663    protected void updateDocumentReadAclsNoCache(String id) {
664        // no transient for state read, and we don't want to trash caches
665        // fetch from repository only the properties needed for Read ACL computation and recursion
666        State state = connection.readPartialState(id, READ_ACL_RECURSION_KEYS);
667        State oldState = new State(1);
668        oldState.put(KEY_READ_ACL, state.get(KEY_READ_ACL));
669        // compute new value
670        State newState = new State(1);
671        newState.put(KEY_READ_ACL, getReadACL(state));
672        StateDiff diff = StateHelper.diff(oldState, newState);
673        if (!diff.isEmpty()) {
674            // no transient for state write, we write directly and just invalidate caches
675            connection.updateState(id, diff, null);
676        }
677    }
678
679    /**
680     * Gets the Read ACL (flat list of users having browse permission, including inheritance) on a document.
681     */
682    protected String[] getReadACL(State state) {
683        Set<String> racls = new HashSet<>();
684        boolean replaceReadVersionPermission = false;
685        if (TRUE.equals(state.get(KEY_IS_VERSION))) {
686            replaceReadVersionPermission = !disableReadVersionPermission;
687            if (versionAclMode == VersionAclMode.DISABLED) {
688                String versionSeriesId = (String) state.get(KEY_VERSION_SERIES_ID);
689                if (versionSeriesId == null || (state = getStateForRead(versionSeriesId)) == null) {
690                    // version with no live doc
691                    return new String[0];
692                }
693            }
694        }
695        LOOP: do {
696            @SuppressWarnings("unchecked")
697            List<Serializable> aclList = (List<Serializable>) state.get(KEY_ACP);
698            if (aclList != null) {
699                for (Serializable aclSer : aclList) {
700                    State aclMap = (State) aclSer;
701                    @SuppressWarnings("unchecked")
702                    List<Serializable> aceList = (List<Serializable>) aclMap.get(KEY_ACL);
703                    for (Serializable aceSer : aceList) {
704                        State aceMap = (State) aceSer;
705                        String username = (String) aceMap.get(KEY_ACE_USER);
706                        String permission = (String) aceMap.get(KEY_ACE_PERMISSION);
707                        Boolean granted = (Boolean) aceMap.get(KEY_ACE_GRANT);
708                        Long status = (Long) aceMap.get(KEY_ACE_STATUS);
709                        if (replaceReadVersionPermission && READ_VERSION.equals(permission)) {
710                            permission = READ;
711                        }
712                        if (TRUE.equals(granted) && browsePermissions.contains(permission)
713                                && (status == null || status == 1)) {
714                            racls.add(username);
715                        }
716                        if (FALSE.equals(granted)) {
717                            if (!EVERYONE.equals(username)) {
718                                // TODO log
719                                racls.add(UNSUPPORTED_ACL);
720                            }
721                            break LOOP;
722                        }
723                    }
724                }
725            }
726            // get the parent; for a version the parent is the live document
727            String parentKey;
728            if (TRUE.equals(state.get(KEY_IS_VERSION))) {
729                replaceReadVersionPermission = !disableReadVersionPermission;
730                parentKey = KEY_VERSION_SERIES_ID;
731            } else {
732                parentKey = KEY_PARENT_ID;
733            }
734            String parentId = (String) state.get(parentKey);
735            state = parentId == null ? null : getStateForRead(parentId);
736        } while (state != null);
737
738        // sort to have canonical order
739        List<String> racl = new ArrayList<>(racls);
740        Collections.sort(racl);
741        return racl.toArray(new String[racl.size()]);
742    }
743
744    protected Stream<State> getDescendants(String id, Set<String> keys, int limit) {
745        return connection.getDescendants(id, keys, limit);
746    }
747
748    public List<DBSDocumentState> getKeyValuedStates(String key, Object value) {
749        List<DBSDocumentState> docStates = new LinkedList<>();
750        Set<String> seen = new HashSet<>();
751        // check transient state
752        for (DBSDocumentState docState : transientStates.values()) {
753            if (!value.equals(docState.get(key))) {
754                continue;
755            }
756            docStates.add(docState);
757            seen.add(docState.getId());
758        }
759        // fetch from repository
760        List<State> states = connection.queryKeyValue(key, value, seen);
761        for (State state : states) {
762            docStates.add(newTransientState(state));
763        }
764        return docStates;
765    }
766
767    public List<DBSDocumentState> getKeyValuedStates(String key1, Object value1, String key2, Object value2) {
768        List<DBSDocumentState> docStates = new LinkedList<>();
769        Set<String> seen = new HashSet<>();
770        // check transient state
771        for (DBSDocumentState docState : transientStates.values()) {
772            seen.add(docState.getId());
773            if (!(value1.equals(docState.get(key1)) && value2.equals(docState.get(key2)))) {
774                continue;
775            }
776            docStates.add(docState);
777        }
778        // fetch from repository
779        List<State> states = connection.queryKeyValue(key1, value1, key2, value2, seen);
780        for (State state : states) {
781            docStates.add(newTransientState(state));
782        }
783        return docStates;
784    }
785
786    /** @since 11.1 */
787    public PartialList<Map<String, Serializable>> queryAndFetch(DBSExpressionEvaluator evaluator,
788            OrderByClause orderByClause, boolean distinctDocuments, int limit, int offset, int countUpTo) {
789        return connection.queryAndFetch(evaluator, orderByClause, distinctDocuments, limit, offset, countUpTo);
790    }
791
792    /** @since 11.1 */
793    public ScrollResult<String> scroll(DBSExpressionEvaluator evaluator, int batchSize, int keepAliveSeconds) {
794        return connection.scroll(evaluator, batchSize, keepAliveSeconds);
795    }
796
797    /** @since 11.1 */
798    public ScrollResult<String> scroll(String scrollId) {
799        return connection.scroll(scrollId);
800    }
801
802    /** @since 11.1 */
803    @Override
804    public Lock getLock(String id) {
805        return connection.getLock(id);
806    }
807
808    /** @since 11.1 */
809    @Override
810    public Lock setLock(String id, Lock lock) {
811        return connection.setLock(id, lock);
812    }
813
814    /** @since 11.1 */
815    @Override
816    public Lock removeLock(String id, String owner) {
817        return connection.removeLock(id, owner);
818    }
819
820    /**
821     * Removes a list of documents.
822     * <p>
823     * Called after a {@link #save} has been done.
824     */
825    public void removeStates(Set<String> ids) {
826        if (undoLog != null) {
827            for (String id : ids) {
828                if (undoLog.containsKey(id)) {
829                    // there's already a create or an update in the undo log
830                    State oldUndo = undoLog.get(id);
831                    if (oldUndo == null) {
832                        // create + delete -> forget
833                        undoLog.remove(id);
834                    } else {
835                        // update + delete -> original old state to re-create
836                        oldUndo.put(KEY_UNDOLOG_CREATE, TRUE);
837                    }
838                } else {
839                    // just delete -> store old state to re-create
840                    State oldState = StateHelper.deepCopy(getStateForRead(id));
841                    oldState.put(KEY_UNDOLOG_CREATE, TRUE);
842                    undoLog.put(id, oldState);
843                }
844            }
845        }
846        for (String id : ids) {
847            transientStates.remove(id);
848        }
849        connection.deleteStates(ids);
850    }
851
852    public void markUserChange(String id) {
853        userChangeIds.add(id);
854    }
855
856    /**
857     * Writes transient state to database.
858     * <p>
859     * An undo log is kept in order to rollback the transaction later if needed.
860     */
861    public void save() {
862        updateProxies();
863        List<Work> works;
864        if (!repository.isFulltextDisabled()) {
865            // TODO getting fulltext already does a getStateChange
866            works = getFulltextWorks();
867        } else {
868            works = Collections.emptyList();
869        }
870        List<State> statesToCreate = new ArrayList<>();
871        for (String id : transientCreated) { // ordered
872            DBSDocumentState docState = transientStates.get(id);
873            docState.setNotDirty();
874            if (undoLog != null) {
875                undoLog.put(id, null); // marker to denote create
876            }
877            State state = docState.getState();
878            state.put(KEY_CHANGE_TOKEN, INITIAL_CHANGE_TOKEN);
879            statesToCreate.add(state);
880        }
881        if (!statesToCreate.isEmpty()) {
882            connection.createStates(statesToCreate);
883        }
884        for (DBSDocumentState docState : transientStates.values()) {
885            String id = docState.getId();
886            if (transientCreated.contains(id)) {
887                continue; // already done
888            }
889            StateDiff diff = docState.getStateChange();
890            if (diff != null) {
891                try {
892                    if (undoLog != null) {
893                        if (!undoLog.containsKey(id)) {
894                            undoLog.put(id, StateHelper.deepCopy(docState.getOriginalState()));
895                        }
896                        // else there's already a create or an update in the undo log so original info is enough
897                    }
898                    ChangeTokenUpdater changeTokenUpdater;
899                    if (repository.isChangeTokenEnabled()) {
900                        // increment system change token
901                        Long base = (Long) docState.get(KEY_SYS_CHANGE_TOKEN);
902                        docState.put(KEY_SYS_CHANGE_TOKEN, DeltaLong.valueOf(base, 1));
903                        diff.put(KEY_SYS_CHANGE_TOKEN, DeltaLong.valueOf(base, 1));
904                        // update change token if applicable (user change)
905                        if (userChangeIds.contains(id)) {
906                            changeTokenUpdater = new ChangeTokenUpdater(docState);
907                        } else {
908                            changeTokenUpdater = null;
909                        }
910                    } else {
911                        changeTokenUpdater = null;
912                    }
913                    connection.updateState(id, diff, changeTokenUpdater);
914                } finally {
915                    docState.setNotDirty();
916                }
917            }
918        }
919        transientCreated.clear();
920        userChangeIds.clear();
921        scheduleWork(works);
922    }
923
924    /**
925     * Logic to get the conditions to use to match and update a change token.
926     * <p>
927     * This may be called several times for a single DBS document update, because the low-level storage may need several
928     * database updates for a single high-level update in some cases.
929     *
930     * @since 9.1
931     */
932    public static class ChangeTokenUpdater {
933
934        protected final DBSDocumentState docState;
935
936        protected Long oldToken;
937
938        public ChangeTokenUpdater(DBSDocumentState docState) {
939            this.docState = docState;
940            oldToken = (Long) docState.getOriginalState().get(KEY_CHANGE_TOKEN);
941        }
942
943        /**
944         * Gets the conditions to use to match a change token.
945         */
946        public Map<String, Serializable> getConditions() {
947            return Map.of(KEY_CHANGE_TOKEN, oldToken);
948        }
949
950        /**
951         * Gets the updates to make to write the updated change token.
952         */
953        public Map<String, Serializable> getUpdates() {
954            Long newToken;
955            if (oldToken == null) {
956                // document without change token, just created
957                newToken = INITIAL_CHANGE_TOKEN;
958            } else {
959                newToken = BaseDocument.updateChangeToken(oldToken);
960            }
961            // also store the new token in the state (without marking dirty), for the next update
962            docState.getState().put(KEY_CHANGE_TOKEN, newToken);
963            oldToken = newToken;
964            return Map.of(KEY_CHANGE_TOKEN, newToken);
965        }
966    }
967
968    protected void applyUndoLog() {
969        Set<String> deletes = new HashSet<>();
970        for (Entry<String, State> es : undoLog.entrySet()) {
971            String id = es.getKey();
972            State state = es.getValue();
973            if (state == null) {
974                deletes.add(id);
975            } else {
976                boolean recreate = state.remove(KEY_UNDOLOG_CREATE) != null;
977                if (recreate) {
978                    connection.createState(state);
979                } else {
980                    // undo update
981                    State currentState = connection.readState(id);
982                    if (currentState != null) {
983                        StateDiff diff = StateHelper.diff(currentState, state);
984                        if (!diff.isEmpty()) {
985                            connection.updateState(id, diff, null);
986                        }
987                    }
988                    // else we expected to read a current state but it was concurrently deleted...
989                    // in that case leave it deleted
990                }
991            }
992        }
993        if (!deletes.isEmpty()) {
994            connection.deleteStates(deletes);
995        }
996    }
997
998    /**
999     * Checks if the changed documents are proxy targets, and updates the proxies if that's the case.
1000     */
1001    protected void updateProxies() {
1002        for (String id : transientCreated) { // ordered
1003            DBSDocumentState docState = transientStates.get(id);
1004            updateProxies(docState);
1005        }
1006        // copy as we may modify proxies
1007        for (String id : transientStates.keySet().toArray(new String[0])) {
1008            DBSDocumentState docState = transientStates.get(id);
1009            if (transientCreated.contains(id)) {
1010                continue; // already done
1011            }
1012            if (docState.isDirty()) {
1013                updateProxies(docState);
1014            }
1015        }
1016    }
1017
1018    protected void updateProxies(DBSDocumentState target) {
1019        Object[] proxyIds = (Object[]) target.get(KEY_PROXY_IDS);
1020        if (proxyIds != null) {
1021            for (Object proxyId : proxyIds) {
1022                try {
1023                    updateProxy(target, (String) proxyId);
1024                } catch (ConcurrentUpdateException e) {
1025                    e.addInfo("On doc " + target.getId());
1026                    log.error(e, e);
1027                    // do not throw, this avoids crashing the session
1028                }
1029            }
1030        }
1031    }
1032
1033    /**
1034     * Updates the state of a proxy based on its target.
1035     */
1036    protected void updateProxy(DBSDocumentState target, String proxyId) {
1037        DBSDocumentState proxy = getStateForUpdate(proxyId);
1038        if (proxy == null) {
1039            log.debug("Proxy " + proxyId + " concurrently deleted");
1040            return;
1041        }
1042        SchemaManager schemaManager = Framework.getService(SchemaManager.class);
1043        // clear all proxy data
1044        for (String key : proxy.getState().keyArray()) {
1045            if (!isProxySpecific(key, schemaManager)) {
1046                proxy.put(key, null);
1047            }
1048        }
1049        // copy from target
1050        for (Entry<String, Serializable> en : target.getState().entrySet()) {
1051            String key = en.getKey();
1052            if (!isProxySpecific(key, schemaManager)) {
1053                proxy.put(key, StateHelper.deepCopy(en.getValue()));
1054            }
1055        }
1056    }
1057
1058    /**
1059     * Things that we don't touch on a proxy when updating it.
1060     */
1061    protected boolean isProxySpecific(String key, SchemaManager schemaManager) {
1062        switch (key) {
1063        // these are placeful stuff
1064        case KEY_SYS_CHANGE_TOKEN:
1065        case KEY_CHANGE_TOKEN:
1066        case KEY_ID:
1067        case KEY_PARENT_ID:
1068        case KEY_ANCESTOR_IDS:
1069        case KEY_NAME:
1070        case KEY_POS:
1071        case KEY_ACP:
1072        case KEY_READ_ACL:
1073            // these are proxy-specific
1074        case KEY_IS_PROXY:
1075        case KEY_PROXY_TARGET_ID:
1076        case KEY_PROXY_VERSION_SERIES_ID:
1077        case KEY_IS_VERSION:
1078        case KEY_PROXY_IDS:
1079            return true;
1080        }
1081        int p = key.indexOf(':');
1082        if (p == -1) {
1083            // no prefix, assume not proxy-specific
1084            return false;
1085        }
1086        String prefix = key.substring(0, p);
1087        Schema schema = schemaManager.getSchemaFromPrefix(prefix);
1088        if (schema == null) {
1089            schema = schemaManager.getSchema(prefix);
1090            if (schema == null) {
1091                // unknown prefix, assume not proxy-specific
1092                return false;
1093            }
1094        }
1095        return schemaManager.isProxySchema(schema.getName(), null); // type unused
1096    }
1097
1098    /**
1099     * Called when created in a transaction.
1100     *
1101     * @since 7.4
1102     */
1103    public void begin() {
1104        if (!repository.supportsTransactions()) {
1105            if (undoLog != null) {
1106                throw new NuxeoException("Transaction already started");
1107            }
1108            undoLog = new HashMap<>();
1109        }
1110        connection.begin();
1111    }
1112
1113    /**
1114     * Saves and flushes to database.
1115     */
1116    public void commit() {
1117        save();
1118        // clear transient, this means that after this references to states will be stale
1119        // TODO mark states as invalid
1120        clearTransient();
1121        // the transaction ended, the proxied DBSSession will disappear and cannot be reused anyway
1122        if (undoLog != null) {
1123            undoLog = null;
1124        }
1125        connection.commit();
1126    }
1127
1128    /**
1129     * Rolls back the save state by applying the undo log.
1130     */
1131    public void rollback() {
1132        clearTransient();
1133        if (undoLog != null) {
1134            applyUndoLog();
1135            // the transaction ended, the proxied DBSSession will disappear and cannot be reused anyway
1136            undoLog = null;
1137        }
1138        connection.rollback();
1139    }
1140
1141    protected void clearTransient() {
1142        transientStates.clear();
1143        transientCreated.clear();
1144    }
1145
1146    /**
1147     * Gets the fulltext updates to do. Called at save() time.
1148     *
1149     * @return a list of {@link Work} instances to schedule post-commit.
1150     */
1151    protected List<Work> getFulltextWorks() {
1152        Set<String> docsWithDirtyStrings = new HashSet<>();
1153        Set<String> docsWithDirtyBinaries = new HashSet<>();
1154        findDirtyDocuments(docsWithDirtyStrings, docsWithDirtyBinaries);
1155        if (repository.getFulltextConfiguration().fulltextSearchDisabled) {
1156            // We only need to update dirty simple strings if fulltext search is not disabled
1157            // because in that case Elasticsearch will do its own extraction/indexing.
1158            // We need to detect dirty binary strings in all cases, because Elasticsearch
1159            // will need them even if the repository itself doesn't use them for search.
1160            docsWithDirtyStrings = Collections.emptySet();
1161        }
1162        Set<String> dirtyIds = new HashSet<>();
1163        dirtyIds.addAll(docsWithDirtyStrings);
1164        dirtyIds.addAll(docsWithDirtyBinaries);
1165        if (dirtyIds.isEmpty()) {
1166            return Collections.emptyList();
1167        }
1168        markIndexingInProgress(dirtyIds);
1169        List<Work> works = new ArrayList<>(dirtyIds.size());
1170        for (String id : dirtyIds) {
1171            boolean updateSimpleText = docsWithDirtyStrings.contains(id);
1172            boolean updateBinaryText = docsWithDirtyBinaries.contains(id);
1173            Work work = new FulltextExtractorWork(repository.getName(), id, updateSimpleText, updateBinaryText, true);
1174            works.add(work);
1175        }
1176        return works;
1177    }
1178
1179    protected void markIndexingInProgress(Set<String> ids) {
1180        FulltextConfiguration fulltextConfiguration = repository.getFulltextConfiguration();
1181        for (DBSDocumentState docState : getStatesForUpdate(ids)) {
1182            if (!fulltextConfiguration.isFulltextIndexable(docState.getPrimaryType())) {
1183                continue;
1184            }
1185            docState.put(KEY_FULLTEXT_JOBID, docState.getId());
1186        }
1187    }
1188
1189    /**
1190     * Finds the documents having dirty text or dirty binaries that have to be reindexed as fulltext.
1191     *
1192     * @param docsWithDirtyStrings set of ids, updated by this method
1193     * @param docsWithDirtyBinaries set of ids, updated by this method
1194     */
1195    protected void findDirtyDocuments(Set<String> docsWithDirtyStrings, Set<String> docsWithDirtyBinaries) {
1196        for (DBSDocumentState docState : transientStates.values()) {
1197            State originalState = docState.getOriginalState();
1198            State state = docState.getState();
1199            if (originalState == state) {
1200                continue;
1201            }
1202            StateDiff diff = StateHelper.diff(originalState, state);
1203            if (diff.isEmpty()) {
1204                continue;
1205            }
1206            StateDiff rdiff = StateHelper.diff(state, originalState);
1207            // we do diffs in both directions to capture removal of complex list elements,
1208            // for instance for {foo: [{bar: baz}] -> {foo: []}
1209            // diff paths = foo and rdiff paths = foo/*/bar
1210            Set<String> paths = new HashSet<>();
1211            DirtyPathsFinder dirtyPathsFinder = new DirtyPathsFinder(paths);
1212            dirtyPathsFinder.findDirtyPaths(diff);
1213            dirtyPathsFinder.findDirtyPaths(rdiff);
1214            FulltextConfiguration fulltextConfiguration = repository.getFulltextConfiguration();
1215            boolean dirtyStrings = false;
1216            boolean dirtyBinaries = false;
1217            for (String path : paths) {
1218                Set<String> indexesSimple = fulltextConfiguration.indexesByPropPathSimple.get(path);
1219                if (indexesSimple != null && !indexesSimple.isEmpty()) {
1220                    dirtyStrings = true;
1221                    if (dirtyBinaries) {
1222                        break;
1223                    }
1224                }
1225                Set<String> indexesBinary = fulltextConfiguration.indexesByPropPathBinary.get(path);
1226                if (indexesBinary != null && !indexesBinary.isEmpty()) {
1227                    dirtyBinaries = true;
1228                    if (dirtyStrings) {
1229                        break;
1230                    }
1231                }
1232            }
1233            if (dirtyStrings) {
1234                docsWithDirtyStrings.add(docState.getId());
1235            }
1236            if (dirtyBinaries) {
1237                docsWithDirtyBinaries.add(docState.getId());
1238            }
1239        }
1240    }
1241
1242    /**
1243     * Iterates on a state diff to find the paths corresponding to dirty values.
1244     *
1245     * @since 7.10-HF04, 8.1
1246     */
1247    protected static class DirtyPathsFinder {
1248
1249        protected Set<String> paths;
1250
1251        public DirtyPathsFinder(Set<String> paths) {
1252            this.paths = paths;
1253        }
1254
1255        public void findDirtyPaths(StateDiff value) {
1256            findDirtyPaths(value, null);
1257        }
1258
1259        protected void findDirtyPaths(Object value, String path) {
1260            if (value instanceof Object[]) {
1261                findDirtyPaths((Object[]) value, path);
1262            } else if (value instanceof List) {
1263                findDirtyPaths((List<?>) value, path);
1264            } else if (value instanceof ListDiff) {
1265                findDirtyPaths((ListDiff) value, path);
1266            } else if (value instanceof State) {
1267                findDirtyPaths((State) value, path);
1268            } else {
1269                paths.add(path);
1270            }
1271        }
1272
1273        protected void findDirtyPaths(Object[] value, String path) {
1274            String newPath = path + "/*";
1275            for (Object v : value) {
1276                findDirtyPaths(v, newPath);
1277            }
1278        }
1279
1280        protected void findDirtyPaths(List<?> value, String path) {
1281            String newPath = path + "/*";
1282            for (Object v : value) {
1283                findDirtyPaths(v, newPath);
1284            }
1285        }
1286
1287        protected void findDirtyPaths(ListDiff value, String path) {
1288            String newPath = path;
1289            if (value.diff != null) {
1290                findDirtyPaths(value.diff, newPath);
1291            }
1292            if (value.rpush != null) {
1293                findDirtyPaths(value.rpush, newPath);
1294            }
1295            if (value.pull != null) {
1296                findDirtyPaths(value.pull, newPath);
1297            }
1298        }
1299
1300        protected void findDirtyPaths(State value, String path) {
1301            for (Entry<String, Serializable> es : value.entrySet()) {
1302                String key = es.getKey();
1303                Serializable v = es.getValue();
1304                String newPath = path == null ? key : path + "/" + key;
1305                findDirtyPaths(v, newPath);
1306            }
1307        }
1308    }
1309
1310    protected void scheduleWork(List<Work> works) {
1311        // do async fulltext indexing only if high-level sessions are available
1312        RepositoryManager repositoryManager = Framework.getService(RepositoryManager.class);
1313        if (repositoryManager != null && !works.isEmpty()) {
1314            WorkManager workManager = Framework.getService(WorkManager.class);
1315            for (Work work : works) {
1316                // schedule work post-commit
1317                // in non-tx mode, this may execute it nearly immediately
1318                workManager.schedule(work, Scheduling.IF_NOT_SCHEDULED, true);
1319            }
1320        }
1321    }
1322
1323}