001/*
002 * Copyright (c) 2006-2013 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * All rights reserved. This program and the accompanying materials
005 * are made available under the terms of the Eclipse Public License v1.0
006 * which accompanies this distribution, and is available at
007 * http://www.eclipse.org/legal/epl-v10.html
008 *
009 * Contributors:
010 *     Florent Guillaume
011 */
012package org.nuxeo.ecm.core.storage.sql;
013
014import java.io.Serializable;
015import java.text.Normalizer;
016import java.util.ArrayList;
017import java.util.Arrays;
018import java.util.Calendar;
019import java.util.Collection;
020import java.util.Collections;
021import java.util.HashMap;
022import java.util.HashSet;
023import java.util.Iterator;
024import java.util.LinkedList;
025import java.util.List;
026import java.util.Map;
027import java.util.Map.Entry;
028import java.util.Set;
029
030import javax.resource.ResourceException;
031import javax.resource.cci.ConnectionMetaData;
032import javax.resource.cci.Interaction;
033import javax.resource.cci.LocalTransaction;
034import javax.resource.cci.ResultSetInfo;
035import javax.transaction.xa.XAException;
036import javax.transaction.xa.XAResource;
037import javax.transaction.xa.Xid;
038
039import org.apache.commons.lang.StringUtils;
040import org.apache.commons.logging.Log;
041import org.apache.commons.logging.LogFactory;
042import org.nuxeo.ecm.core.api.ConcurrentUpdateException;
043import org.nuxeo.ecm.core.api.IterableQueryResult;
044import org.nuxeo.ecm.core.api.NuxeoException;
045import org.nuxeo.ecm.core.api.PartialList;
046import org.nuxeo.ecm.core.api.repository.RepositoryManager;
047import org.nuxeo.ecm.core.api.security.ACL;
048import org.nuxeo.ecm.core.api.security.SecurityConstants;
049import org.nuxeo.ecm.core.model.LockManager;
050import org.nuxeo.ecm.core.query.QueryFilter;
051import org.nuxeo.ecm.core.query.sql.NXQL;
052import org.nuxeo.ecm.core.storage.FulltextParser;
053import org.nuxeo.ecm.core.storage.FulltextUpdaterWork;
054import org.nuxeo.ecm.core.storage.FulltextUpdaterWork.IndexAndText;
055import org.nuxeo.ecm.core.storage.sql.PersistenceContext.PathAndId;
056import org.nuxeo.ecm.core.storage.sql.RowMapper.RowBatch;
057import org.nuxeo.ecm.core.storage.sql.coremodel.SQLFulltextExtractorWork;
058import org.nuxeo.ecm.core.work.api.Work;
059import org.nuxeo.ecm.core.work.api.WorkManager;
060import org.nuxeo.ecm.core.work.api.WorkManager.Scheduling;
061import org.nuxeo.runtime.api.Framework;
062import org.nuxeo.runtime.metrics.MetricsService;
063import org.nuxeo.runtime.transaction.TransactionHelper;
064
065import com.codahale.metrics.MetricRegistry;
066import com.codahale.metrics.SharedMetricRegistries;
067import com.codahale.metrics.Timer;
068
069/**
070 * The session is the main high level access point to data from the underlying database.
071 */
072public class SessionImpl implements Session, XAResource {
073
074    private static final Log log = LogFactory.getLog(SessionImpl.class);
075
076    /**
077     * Set this system property to false if you don't want repositories to be looked up under the compatibility name
078     * "default" in the "repositories" table.
079     * <p>
080     * Only do this if you start from an empty database, or if you have migrated the "repositories" table by hand, or if
081     * you need to create a new repository in a database already containing a "default" repository (table sharing, not
082     * recommended).
083     */
084    public static final String COMPAT_REPOSITORY_NAME_KEY = "org.nuxeo.vcs.repository.name.default.compat";
085
086    private static final boolean COMPAT_REPOSITORY_NAME = Boolean.parseBoolean(Framework.getProperty(
087            COMPAT_REPOSITORY_NAME_KEY, "true"));
088
089    protected final RepositoryImpl repository;
090
091    private final Mapper mapper;
092
093    private final Model model;
094
095    protected final FulltextParser fulltextParser;
096
097    // public because used by unit tests
098    public final PersistenceContext context;
099
100    private volatile boolean live;
101
102    private boolean inTransaction;
103
104    private Node rootNode;
105
106    private long threadId;
107
108    private String threadName;
109
110    private Throwable threadStack;
111
112    private boolean readAclsChanged;
113
114    // @since 5.7
115    protected final MetricRegistry registry = SharedMetricRegistries.getOrCreate(MetricsService.class.getName());
116
117    private final Timer saveTimer;
118
119    private final Timer queryTimer;
120
121    private final Timer aclrUpdateTimer;
122
123    private static final java.lang.String LOG_MIN_DURATION_KEY = "org.nuxeo.vcs.query.log_min_duration_ms";
124
125    private static final long LOG_MIN_DURATION_NS = Long.parseLong(Framework.getProperty(LOG_MIN_DURATION_KEY, "-1")) * 1000000;
126
127    public SessionImpl(RepositoryImpl repository, Model model, Mapper mapper) {
128        this.repository = repository;
129        this.mapper = mapper;
130        this.model = model;
131        context = new PersistenceContext(model, mapper, this);
132        live = true;
133        readAclsChanged = false;
134
135        try {
136            fulltextParser = repository.fulltextParserClass.newInstance();
137        } catch (ReflectiveOperationException e) {
138            throw new NuxeoException(e);
139        }
140        saveTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(), "saves"));
141        queryTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(), "queries"));
142        aclrUpdateTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(),
143                "aclr-updates"));
144
145        computeRootNode();
146    }
147
148    public void checkLive() {
149        if (!live) {
150            throw new IllegalStateException("Session is not live");
151        }
152        checkThread();
153    }
154
155    // called by NetServlet when forwarding remote NetMapper calls.
156    @Override
157    public Mapper getMapper() {
158        return mapper;
159    }
160
161    /**
162     * Gets the XAResource. Called by the ManagedConnectionImpl, which actually wraps it in a connection-aware
163     * implementation.
164     */
165    public XAResource getXAResource() {
166        return this;
167    }
168
169    /**
170     * Clears all the caches. Called by RepositoryManagement.
171     */
172    protected int clearCaches() {
173        if (inTransaction) {
174            // avoid potential multi-threaded access to active session
175            return 0;
176        }
177        checkThreadEnd();
178        return context.clearCaches();
179    }
180
181    protected PersistenceContext getContext() {
182        return context;
183    }
184
185    protected void rollback() {
186        context.clearCaches();
187    }
188
189    protected void checkThread() {
190        if (threadId == 0) {
191            return;
192        }
193        long currentThreadId = Thread.currentThread().getId();
194        if (threadId == currentThreadId) {
195            return;
196        }
197        String currentThreadName = Thread.currentThread().getName();
198        String msg = String.format("Concurrency Error: Session was started in thread %s (%s)"
199                + " but is being used in thread %s (%s)", threadId, threadName, currentThreadId, currentThreadName);
200        throw new IllegalStateException(msg, threadStack);
201    }
202
203    protected void checkThreadStart() {
204        threadId = Thread.currentThread().getId();
205        threadName = Thread.currentThread().getName();
206        if (log.isDebugEnabled()) {
207            threadStack = new Throwable("owner stack trace");
208        }
209    }
210
211    protected void checkThreadEnd() {
212        threadId = 0;
213        threadName = null;
214        threadStack = null;
215    }
216
217    /**
218     * Generates a new id, or used a pre-generated one (import).
219     */
220    protected Serializable generateNewId(Serializable id) {
221        return context.generateNewId(id);
222    }
223
224    protected boolean isIdNew(Serializable id) {
225        return context.isIdNew(id);
226    }
227
228    /*
229     * ----- javax.resource.cci.Connection -----
230     */
231
232    @Override
233    public void close() throws ResourceException {
234        try {
235            checkLive();
236            closeSession();
237            repository.closeSession(this);
238        } catch (Exception cause) {
239            throw new ResourceException(cause);
240        }
241    }
242
243    protected void closeSession() {
244        live = false;
245        context.clearCaches();
246        // close the mapper and therefore the connection
247        mapper.close();
248        // don't clean the caches, we keep the pristine cache around
249        // TODO this is getting destroyed, we can clean everything
250    }
251
252    @Override
253    public Interaction createInteraction() throws ResourceException {
254        throw new UnsupportedOperationException();
255    }
256
257    @Override
258    public LocalTransaction getLocalTransaction() throws ResourceException {
259        throw new UnsupportedOperationException();
260    }
261
262    @Override
263    public ConnectionMetaData getMetaData() throws ResourceException {
264        throw new UnsupportedOperationException();
265    }
266
267    @Override
268    public ResultSetInfo getResultSetInfo() throws ResourceException {
269        throw new UnsupportedOperationException();
270    }
271
272    /*
273     * ----- Session -----
274     */
275
276    @Override
277    public boolean isLive() {
278        return live;
279    }
280
281    @Override
282    public boolean isStateSharedByAllThreadSessions() {
283        // only the JCA handle returns true
284        return false;
285    }
286
287    @Override
288    public String getRepositoryName() {
289        return repository.getName();
290    }
291
292    @Override
293    public Model getModel() {
294        return model;
295    }
296
297    @Override
298    public Node getRootNode() {
299        checkLive();
300        return rootNode;
301    }
302
303    @Override
304    public void save() {
305        final Timer.Context timerContext = saveTimer.time();
306        try {
307            checkLive();
308            flush();
309            if (!inTransaction) {
310                sendInvalidationsToOthers();
311                // as we don't have a way to know when the next
312                // non-transactional
313                // statement will start, process invalidations immediately
314            }
315            processReceivedInvalidations();
316        } finally {
317            timerContext.stop();
318        }
319    }
320
321    protected void flush() {
322        checkThread();
323        List<Work> works;
324        if (!repository.getRepositoryDescriptor().getFulltextDisabled()) {
325            works = getFulltextWorks();
326        } else {
327            works = Collections.emptyList();
328        }
329        doFlush();
330        if (readAclsChanged) {
331            updateReadAcls();
332        }
333        scheduleWork(works);
334        checkInvalidationsConflict();
335    }
336
337    protected void scheduleWork(List<Work> works) {
338        // do async fulltext indexing only if high-level sessions are available
339        RepositoryManager repositoryManager = Framework.getLocalService(RepositoryManager.class);
340        if (repositoryManager != null && !works.isEmpty()) {
341            WorkManager workManager = Framework.getLocalService(WorkManager.class);
342            for (Work work : works) {
343                // schedule work post-commit
344                // in non-tx mode, this may execute it nearly immediately
345                workManager.schedule(work, Scheduling.IF_NOT_SCHEDULED, true);
346            }
347        }
348    }
349
350    protected void doFlush() {
351        List<Fragment> fragmentsToClearDirty = new ArrayList<>(0);
352        RowBatch batch = context.getSaveBatch(fragmentsToClearDirty);
353        if (!batch.isEmpty()) {
354            log.debug("Saving session");
355            // execute the batch
356            mapper.write(batch);
357            log.debug("End of save");
358            for (Fragment fragment : fragmentsToClearDirty) {
359                fragment.clearDirty();
360            }
361        }
362    }
363
364    protected Serializable getContainingDocument(Serializable id) {
365        return context.getContainingDocument(id);
366    }
367
368    /**
369     * Gets the fulltext updates to do. Called at save() time.
370     *
371     * @return a list of {@link Work} instances to schedule post-commit.
372     */
373    protected List<Work> getFulltextWorks() {
374        Set<Serializable> dirtyStrings = new HashSet<Serializable>();
375        Set<Serializable> dirtyBinaries = new HashSet<Serializable>();
376        context.findDirtyDocuments(dirtyStrings, dirtyBinaries);
377        if (dirtyStrings.isEmpty() && dirtyBinaries.isEmpty()) {
378            return Collections.emptyList();
379        }
380
381        List<Work> works = new LinkedList<Work>();
382        getFulltextSimpleWorks(works, dirtyStrings);
383        getFulltextBinariesWorks(works, dirtyBinaries);
384        return works;
385    }
386
387    protected void getFulltextSimpleWorks(List<Work> works, Set<Serializable> dirtyStrings) {
388        // update simpletext on documents with dirty strings
389        for (Serializable docId : dirtyStrings) {
390            if (docId == null) {
391                // cannot happen, but has been observed :(
392                log.error("Got null doc id in fulltext update, cannot happen");
393                continue;
394            }
395            Node document = getNodeById(docId);
396            if (document == null) {
397                // cannot happen
398                continue;
399            }
400            if (document.isProxy()) {
401                // proxies don't have any fulltext attached, it's
402                // the target document that carries it
403                continue;
404            }
405            String documentType = document.getPrimaryType();
406            String[] mixinTypes = document.getMixinTypes();
407
408            if (!model.getFulltextConfiguration().isFulltextIndexable(documentType)) {
409                continue;
410            }
411            document.getSimpleProperty(Model.FULLTEXT_JOBID_PROP).setValue(model.idToString(document.getId()));
412            FulltextFinder fulltextFinder = new FulltextFinder(fulltextParser, document, this);
413            List<IndexAndText> indexesAndText = new LinkedList<IndexAndText>();
414            for (String indexName : model.getFulltextConfiguration().indexNames) {
415                Set<String> paths;
416                if (model.getFulltextConfiguration().indexesAllSimple.contains(indexName)) {
417                    // index all string fields, minus excluded ones
418                    // TODO XXX excluded ones...
419                    paths = model.getSimpleTextPropertyPaths(documentType, mixinTypes);
420                } else {
421                    // index configured fields
422                    paths = model.getFulltextConfiguration().propPathsByIndexSimple.get(indexName);
423                }
424                String text = fulltextFinder.findFulltext(paths);
425                indexesAndText.add(new IndexAndText(indexName, text));
426            }
427            if (!indexesAndText.isEmpty()) {
428                Work work = new FulltextUpdaterWork(repository.getName(), model.idToString(docId), true, false,
429                        indexesAndText);
430                works.add(work);
431            }
432        }
433    }
434
435    protected void getFulltextBinariesWorks(List<Work> works, final Set<Serializable> dirtyBinaries) {
436        if (dirtyBinaries.isEmpty()) {
437            return;
438        }
439
440        // mark indexing in progress, so that future copies (including versions)
441        // will be indexed as well
442        for (Node node : getNodesByIds(new ArrayList<Serializable>(dirtyBinaries))) {
443            if (!model.getFulltextConfiguration().isFulltextIndexable(node.getPrimaryType())) {
444                continue;
445            }
446            node.getSimpleProperty(Model.FULLTEXT_JOBID_PROP).setValue(model.idToString(node.getId()));
447        }
448
449        // FulltextExtractorWork does fulltext extraction using converters
450        // and then schedules a FulltextUpdaterWork to write the results
451        // single-threaded
452        for (Serializable id : dirtyBinaries) {
453            String docId = model.idToString(id);
454            Work work = new SQLFulltextExtractorWork(repository.getName(), docId);
455            works.add(work);
456        }
457    }
458
459    /**
460     * Finds the fulltext in a document and sends it to a fulltext parser.
461     *
462     * @since 5.9.5
463     */
464    protected static class FulltextFinder {
465
466        protected final FulltextParser fulltextParser;
467
468        protected final Node document;
469
470        protected final SessionImpl session;
471
472        protected final String documentType;
473
474        protected final String[] mixinTypes;
475
476        public FulltextFinder(FulltextParser fulltextParser, Node document, SessionImpl session) {
477            this.fulltextParser = fulltextParser;
478            this.document = document;
479            this.session = session;
480            if (document == null) {
481                documentType = null;
482                mixinTypes = null;
483            } else { // null in tests
484                documentType = document.getPrimaryType();
485                mixinTypes = document.getMixinTypes();
486            }
487        }
488
489        /**
490         * Parses the document for one index.
491         */
492        protected String findFulltext(Set<String> paths) {
493            if (paths == null) {
494                return "";
495            }
496            List<String> strings = new ArrayList<String>();
497
498            for (String path : paths) {
499                ModelProperty pi = session.getModel().getPathPropertyInfo(documentType, mixinTypes, path);
500                if (pi == null) {
501                    continue; // doc type doesn't have this property
502                }
503                if (pi.propertyType != PropertyType.STRING && pi.propertyType != PropertyType.ARRAY_STRING) {
504                    continue;
505                }
506
507                List<Node> nodes = new ArrayList<Node>(Collections.singleton(document));
508
509                String[] names = path.split("/");
510                for (int i = 0; i < names.length; i++) {
511                    String name = names[i];
512                    if (i < names.length - 1) {
513                        // traverse
514                        List<Node> newNodes;
515                        if ("*".equals(names[i + 1])) {
516                            // traverse complex list
517                            i++;
518                            newNodes = new ArrayList<Node>();
519                            for (Node node : nodes) {
520                                newNodes.addAll(session.getChildren(node, name, true));
521                            }
522                        } else {
523                            // traverse child
524                            newNodes = new ArrayList<Node>(nodes.size());
525                            for (Node node : nodes) {
526                                node = session.getChildNode(node, name, true);
527                                if (node != null) {
528                                    newNodes.add(node);
529                                }
530                            }
531                        }
532                        nodes = newNodes;
533                    } else {
534                        // last path component: get value
535                        for (Node node : nodes) {
536                            if (pi.propertyType == PropertyType.STRING) {
537                                String v = node.getSimpleProperty(name).getString();
538                                if (v != null) {
539                                    fulltextParser.parse(v, path, strings);
540                                }
541                            } else { /* ARRAY_STRING */
542                                for (Serializable v : node.getCollectionProperty(name).getValue()) {
543                                    if (v != null) {
544                                        fulltextParser.parse((String) v, path, strings);
545                                    }
546                                }
547                            }
548                        }
549                    }
550                }
551            }
552            return StringUtils.join(strings, ' ');
553        }
554    }
555
556    /**
557     * Post-transaction invalidations notification.
558     * <p>
559     * Called post-transaction by session commit/rollback or transactionless save.
560     */
561    protected void sendInvalidationsToOthers() {
562        context.sendInvalidationsToOthers();
563    }
564
565    /**
566     * Processes all invalidations accumulated.
567     * <p>
568     * Called pre-transaction by start or transactionless save;
569     */
570    protected void processReceivedInvalidations() {
571        context.processReceivedInvalidations();
572    }
573
574    /**
575     * Post transaction check invalidations processing.
576     */
577    protected void checkInvalidationsConflict() {
578        // repository.receiveClusterInvalidations(this);
579        context.checkInvalidationsConflict();
580    }
581
582    /*
583     * -------------------------------------------------------------
584     * -------------------------------------------------------------
585     * -------------------------------------------------------------
586     */
587
588    protected Node getNodeById(Serializable id, boolean prefetch) {
589        List<Node> nodes = getNodesByIds(Collections.singletonList(id), prefetch);
590        Node node = nodes.get(0);
591        // ((JDBCMapper) ((CachingMapper)
592        // mapper).mapper).logger.log("getNodeById " + id + " -> " + (node ==
593        // null ? "missing" : "found"));
594        return node;
595    }
596
597    @Override
598    public Node getNodeById(Serializable id) {
599        checkLive();
600        if (id == null) {
601            throw new IllegalArgumentException("Illegal null id");
602        }
603        return getNodeById(id, true);
604    }
605
606    public List<Node> getNodesByIds(List<Serializable> ids, boolean prefetch) {
607        // get hier fragments
608        List<RowId> hierRowIds = new ArrayList<RowId>(ids.size());
609        for (Serializable id : ids) {
610            hierRowIds.add(new RowId(Model.HIER_TABLE_NAME, id));
611        }
612
613        List<Fragment> hierFragments = context.getMulti(hierRowIds, false);
614
615        // find available paths
616        Map<Serializable, String> paths = new HashMap<Serializable, String>();
617        Set<Serializable> parentIds = new HashSet<Serializable>();
618        for (Fragment fragment : hierFragments) {
619            Serializable id = fragment.getId();
620            PathAndId pathOrId = context.getPathOrMissingParentId((SimpleFragment) fragment, false);
621            // find missing fragments
622            if (pathOrId.path != null) {
623                paths.put(id, pathOrId.path);
624            } else {
625                parentIds.add(pathOrId.id);
626            }
627        }
628        // fetch the missing parents and their ancestors in bulk
629        if (!parentIds.isEmpty()) {
630            // fetch them in the context
631            getHierarchyAndAncestors(parentIds);
632            // compute missing paths using context
633            for (Fragment fragment : hierFragments) {
634                Serializable id = fragment.getId();
635                if (paths.containsKey(id)) {
636                    continue;
637                }
638                String path = context.getPath((SimpleFragment) fragment);
639                paths.put(id, path);
640            }
641        }
642
643        // prepare fragment groups to build nodes
644        Map<Serializable, FragmentGroup> fragmentGroups = new HashMap<Serializable, FragmentGroup>(ids.size());
645        for (Fragment fragment : hierFragments) {
646            Serializable id = fragment.row.id;
647            fragmentGroups.put(id, new FragmentGroup((SimpleFragment) fragment, new FragmentsMap()));
648        }
649
650        if (prefetch) {
651            List<RowId> bulkRowIds = new ArrayList<RowId>();
652            Set<Serializable> proxyIds = new HashSet<Serializable>();
653
654            // get rows to prefetch for hier fragments
655            for (Fragment fragment : hierFragments) {
656                findPrefetchedFragments((SimpleFragment) fragment, bulkRowIds, proxyIds);
657            }
658
659            // proxies
660
661            // get proxies fragments
662            List<RowId> proxiesRowIds = new ArrayList<RowId>(proxyIds.size());
663            for (Serializable id : proxyIds) {
664                proxiesRowIds.add(new RowId(Model.PROXY_TABLE_NAME, id));
665            }
666            List<Fragment> proxiesFragments = context.getMulti(proxiesRowIds, true);
667            Set<Serializable> targetIds = new HashSet<Serializable>();
668            for (Fragment fragment : proxiesFragments) {
669                Serializable targetId = ((SimpleFragment) fragment).get(Model.PROXY_TARGET_KEY);
670                targetIds.add(targetId);
671            }
672
673            // get hier fragments for proxies' targets
674            targetIds.removeAll(ids); // only those we don't have already
675            hierRowIds = new ArrayList<RowId>(targetIds.size());
676            for (Serializable id : targetIds) {
677                hierRowIds.add(new RowId(Model.HIER_TABLE_NAME, id));
678            }
679            hierFragments = context.getMulti(hierRowIds, true);
680            for (Fragment fragment : hierFragments) {
681                findPrefetchedFragments((SimpleFragment) fragment, bulkRowIds, null);
682            }
683
684            // we have everything to be prefetched
685
686            // fetch all the prefetches in bulk
687            List<Fragment> fragments = context.getMulti(bulkRowIds, true);
688
689            // put each fragment in the map of the proper group
690            for (Fragment fragment : fragments) {
691                FragmentGroup fragmentGroup = fragmentGroups.get(fragment.row.id);
692                if (fragmentGroup != null) {
693                    fragmentGroup.fragments.put(fragment.row.tableName, fragment);
694                }
695            }
696        }
697
698        // assemble nodes from the fragment groups
699        List<Node> nodes = new ArrayList<Node>(ids.size());
700        for (Serializable id : ids) {
701            FragmentGroup fragmentGroup = fragmentGroups.get(id);
702            // null if deleted/absent
703            Node node = fragmentGroup == null ? null : new Node(context, fragmentGroup, paths.get(id));
704            nodes.add(node);
705        }
706
707        return nodes;
708    }
709
710    /**
711     * Finds prefetched fragments for a hierarchy fragment, takes note of the ones that are proxies.
712     */
713    protected void findPrefetchedFragments(SimpleFragment hierFragment, List<RowId> bulkRowIds,
714            Set<Serializable> proxyIds) {
715        Serializable id = hierFragment.row.id;
716
717        // find type
718        String typeName = (String) hierFragment.get(Model.MAIN_PRIMARY_TYPE_KEY);
719        if (Model.PROXY_TYPE.equals(typeName)) {
720            if (proxyIds != null) {
721                proxyIds.add(id);
722            }
723            return;
724        }
725
726        // find table names
727        Set<String> tableNames = model.getTypePrefetchedFragments(typeName);
728        if (tableNames == null) {
729            return; // unknown (obsolete) type
730        }
731
732        // add row id for each table name
733        Serializable parentId = hierFragment.get(Model.HIER_PARENT_KEY);
734        for (String tableName : tableNames) {
735            if (Model.HIER_TABLE_NAME.equals(tableName)) {
736                continue; // already fetched
737            }
738            if (parentId != null && Model.VERSION_TABLE_NAME.equals(tableName)) {
739                continue; // not a version, don't fetch this table
740                // TODO incorrect if we have filed versions
741            }
742            bulkRowIds.add(new RowId(tableName, id));
743        }
744    }
745
746    @Override
747    public List<Node> getNodesByIds(List<Serializable> ids) {
748        checkLive();
749        return getNodesByIds(ids, true);
750    }
751
752    @Override
753    public Node getParentNode(Node node) {
754        checkLive();
755        if (node == null) {
756            throw new IllegalArgumentException("Illegal null node");
757        }
758        Serializable id = node.getHierFragment().get(Model.HIER_PARENT_KEY);
759        return id == null ? null : getNodeById(id);
760    }
761
762    @Override
763    public String getPath(Node node) {
764        checkLive();
765        String path = node.getPath();
766        if (path == null) {
767            path = context.getPath(node.getHierFragment());
768        }
769        return path;
770    }
771
772    /*
773     * Normalize using NFC to avoid decomposed characters (like 'e' + COMBINING ACUTE ACCENT instead of LATIN SMALL
774     * LETTER E WITH ACUTE). NFKC (normalization using compatibility decomposition) is not used, because compatibility
775     * decomposition turns some characters (LATIN SMALL LIGATURE FFI, TRADE MARK SIGN, FULLWIDTH SOLIDUS) into a series
776     * of characters ('f'+'f'+'i', 'T'+'M', '/') that cannot be re-composed into the original, and therefore loses
777     * information.
778     */
779    protected String normalize(String path) {
780        return Normalizer.normalize(path, Normalizer.Form.NFC);
781    }
782
783    /* Does not apply to properties for now (no use case). */
784    @Override
785    public Node getNodeByPath(String path, Node node) {
786        // TODO optimize this to use a dedicated path-based table
787        checkLive();
788        if (path == null) {
789            throw new IllegalArgumentException("Illegal null path");
790        }
791        path = normalize(path);
792        int i;
793        if (path.startsWith("/")) {
794            node = getRootNode();
795            if (path.equals("/")) {
796                return node;
797            }
798            i = 1;
799        } else {
800            if (node == null) {
801                throw new IllegalArgumentException("Illegal relative path with null node: " + path);
802            }
803            i = 0;
804        }
805        String[] names = path.split("/", -1);
806        for (; i < names.length; i++) {
807            String name = names[i];
808            if (name.length() == 0) {
809                throw new IllegalArgumentException("Illegal path with empty component: " + path);
810            }
811            node = getChildNode(node, name, false);
812            if (node == null) {
813                return null;
814            }
815        }
816        return node;
817    }
818
819    @Override
820    public boolean addMixinType(Node node, String mixin) {
821        if (model.getMixinPropertyInfos(mixin) == null) {
822            throw new IllegalArgumentException("No such mixin: " + mixin);
823        }
824        if (model.getDocumentTypeFacets(node.getPrimaryType()).contains(mixin)) {
825            return false; // already present in type
826        }
827        List<String> list = new ArrayList<String>(Arrays.asList(node.getMixinTypes()));
828        if (list.contains(mixin)) {
829            return false; // already present in node
830        }
831        list.add(mixin);
832        String[] mixins = list.toArray(new String[list.size()]);
833        node.hierFragment.put(Model.MAIN_MIXIN_TYPES_KEY, mixins);
834        // immediately create child nodes (for complex properties) in order
835        // to avoid concurrency issue later on
836        Map<String, String> childrenTypes = model.getMixinComplexChildren(mixin);
837        for (Entry<String, String> es : childrenTypes.entrySet()) {
838            String childName = es.getKey();
839            String childType = es.getValue();
840            addChildNode(node, childName, null, childType, true);
841        }
842        return true;
843    }
844
845    @Override
846    public boolean removeMixinType(Node node, String mixin) {
847        List<String> list = new ArrayList<String>(Arrays.asList(node.getMixinTypes()));
848        if (!list.remove(mixin)) {
849            return false; // not present in node
850        }
851        String[] mixins = list.toArray(new String[list.size()]);
852        if (mixins.length == 0) {
853            mixins = null;
854        }
855        node.hierFragment.put(Model.MAIN_MIXIN_TYPES_KEY, mixins);
856        // remove child nodes
857        Map<String, String> childrenTypes = model.getMixinComplexChildren(mixin);
858        for (String childName : childrenTypes.keySet()) {
859            Node child = getChildNode(node, childName, true);
860            removePropertyNode(child);
861        }
862        node.clearCache();
863        return true;
864    }
865
866    @Override
867    public Node addChildNode(Node parent, String name, Long pos, String typeName, boolean complexProp) {
868        if (pos == null && !complexProp && parent != null) {
869            pos = context.getNextPos(parent.getId(), complexProp);
870        }
871        return addChildNode(null, parent, name, pos, typeName, complexProp);
872    }
873
874    @Override
875    public Node addChildNode(Serializable id, Node parent, String name, Long pos, String typeName,
876            boolean complexProp) {
877        checkLive();
878        if (name == null) {
879            throw new IllegalArgumentException("Illegal null name");
880        }
881        name = normalize(name);
882        if (name.contains("/") || name.equals(".") || name.equals("..")) {
883            throw new IllegalArgumentException("Illegal name: " + name);
884        }
885        if (!model.isType(typeName)) {
886            throw new IllegalArgumentException("Unknown type: " + typeName);
887        }
888        id = generateNewId(id);
889        Serializable parentId = parent == null ? null : parent.hierFragment.getId();
890        Node node = addNode(id, parentId, name, pos, typeName, complexProp);
891        // immediately create child nodes (for complex properties) in order
892        // to avoid concurrency issue later on
893        Map<String, String> childrenTypes = model.getTypeComplexChildren(typeName);
894        for (Entry<String, String> es : childrenTypes.entrySet()) {
895            String childName = es.getKey();
896            String childType = es.getValue();
897            addChildNode(node, childName, null, childType, true);
898        }
899        return node;
900    }
901
902    protected Node addNode(Serializable id, Serializable parentId, String name, Long pos, String typeName,
903            boolean complexProp) {
904        requireReadAclsUpdate();
905        // main info
906        Row hierRow = new Row(Model.HIER_TABLE_NAME, id);
907        hierRow.putNew(Model.HIER_PARENT_KEY, parentId);
908        hierRow.putNew(Model.HIER_CHILD_NAME_KEY, name);
909        hierRow.putNew(Model.HIER_CHILD_POS_KEY, pos);
910        hierRow.putNew(Model.MAIN_PRIMARY_TYPE_KEY, typeName);
911        hierRow.putNew(Model.HIER_CHILD_ISPROPERTY_KEY, Boolean.valueOf(complexProp));
912        SimpleFragment hierFragment = context.createHierarchyFragment(hierRow);
913        FragmentGroup fragmentGroup = new FragmentGroup(hierFragment, new FragmentsMap());
914        return new Node(context, fragmentGroup, context.getPath(hierFragment));
915    }
916
917    @Override
918    public Node addProxy(Serializable targetId, Serializable versionableId, Node parent, String name, Long pos) {
919        if (!repository.getRepositoryDescriptor().getProxiesEnabled()) {
920            throw new NuxeoException("Proxies are disabled by configuration");
921        }
922        Node proxy = addChildNode(parent, name, pos, Model.PROXY_TYPE, false);
923        proxy.setSimpleProperty(Model.PROXY_TARGET_PROP, targetId);
924        proxy.setSimpleProperty(Model.PROXY_VERSIONABLE_PROP, versionableId);
925        SimpleFragment proxyFragment = (SimpleFragment) proxy.fragments.get(Model.PROXY_TABLE_NAME);
926        context.createdProxyFragment(proxyFragment);
927        return proxy;
928    }
929
930    @Override
931    public void setProxyTarget(Node proxy, Serializable targetId) {
932        if (!repository.getRepositoryDescriptor().getProxiesEnabled()) {
933            throw new NuxeoException("Proxies are disabled by configuration");
934        }
935        SimpleProperty prop = proxy.getSimpleProperty(Model.PROXY_TARGET_PROP);
936        Serializable oldTargetId = prop.getValue();
937        if (!oldTargetId.equals(targetId)) {
938            SimpleFragment proxyFragment = (SimpleFragment) proxy.fragments.get(Model.PROXY_TABLE_NAME);
939            context.removedProxyTarget(proxyFragment);
940            proxy.setSimpleProperty(Model.PROXY_TARGET_PROP, targetId);
941            context.addedProxyTarget(proxyFragment);
942        }
943    }
944
945    @Override
946    public boolean hasChildNode(Node parent, String name, boolean complexProp) {
947        checkLive();
948        // TODO could optimize further by not fetching the fragment at all
949        SimpleFragment fragment = context.getChildHierByName(parent.getId(), normalize(name), complexProp);
950        return fragment != null;
951    }
952
953    @Override
954    public Node getChildNode(Node parent, String name, boolean complexProp) {
955        checkLive();
956        if (name == null || name.contains("/") || name.equals(".") || name.equals("..")) {
957            throw new IllegalArgumentException("Illegal name: " + name);
958        }
959        SimpleFragment fragment = context.getChildHierByName(parent.getId(), name, complexProp);
960        return fragment == null ? null : getNodeById(fragment.getId());
961    }
962
963    // TODO optimize with dedicated backend call
964    @Override
965    public boolean hasChildren(Node parent, boolean complexProp) {
966        checkLive();
967        List<SimpleFragment> children = context.getChildren(parent.getId(), null, complexProp);
968        return children.size() > 0;
969    }
970
971    @Override
972    public List<Node> getChildren(Node parent, String name, boolean complexProp) {
973        checkLive();
974        List<SimpleFragment> fragments = context.getChildren(parent.getId(), name, complexProp);
975        List<Node> nodes = new ArrayList<Node>(fragments.size());
976        for (SimpleFragment fragment : fragments) {
977            Node node = getNodeById(fragment.getId());
978            if (node == null) {
979                // cannot happen
980                log.error("Child node cannot be created: " + fragment.getId());
981                continue;
982            }
983            nodes.add(node);
984        }
985        return nodes;
986    }
987
988    @Override
989    public void orderBefore(Node parent, Node source, Node dest) {
990        checkLive();
991        context.orderBefore(parent.getId(), source.getId(), dest == null ? null : dest.getId());
992    }
993
994    @Override
995    public Node move(Node source, Node parent, String name) {
996        checkLive();
997        if (!parent.getId().equals(source.getParentId())) {
998            flush(); // needed when doing many moves for circular stuff
999        }
1000        context.move(source, parent.getId(), name);
1001        requireReadAclsUpdate();
1002        return source;
1003    }
1004
1005    @Override
1006    public Node copy(Node source, Node parent, String name) {
1007        checkLive();
1008        flush();
1009        Serializable id = context.copy(source, parent.getId(), name);
1010        requireReadAclsUpdate();
1011        return getNodeById(id);
1012    }
1013
1014    @Override
1015    public void removeNode(Node node) {
1016        checkLive();
1017        flush();
1018        // remove the lock using the lock manager
1019        // TODO children locks?
1020        getLockManager().removeLock(model.idToString(node.getId()), null);
1021        context.removeNode(node.getHierFragment());
1022    }
1023
1024    @Override
1025    public void removePropertyNode(Node node) {
1026        checkLive();
1027        // no flush needed
1028        context.removePropertyNode(node.getHierFragment());
1029    }
1030
1031    @Override
1032    public Node checkIn(Node node, String label, String checkinComment) {
1033        checkLive();
1034        flush();
1035        Serializable id = context.checkIn(node, label, checkinComment);
1036        requireReadAclsUpdate();
1037        // save to reflect changes immediately in database
1038        flush();
1039        return getNodeById(id);
1040    }
1041
1042    @Override
1043    public void checkOut(Node node) {
1044        checkLive();
1045        context.checkOut(node);
1046        requireReadAclsUpdate();
1047    }
1048
1049    @Override
1050    public void restore(Node node, Node version) {
1051        checkLive();
1052        // save done inside method
1053        context.restoreVersion(node, version);
1054        requireReadAclsUpdate();
1055    }
1056
1057    @Override
1058    public Node getVersionByLabel(Serializable versionSeriesId, String label) {
1059        if (label == null) {
1060            return null;
1061        }
1062        List<Node> versions = getVersions(versionSeriesId);
1063        for (Node node : versions) {
1064            String l = (String) node.getSimpleProperty(Model.VERSION_LABEL_PROP).getValue();
1065            if (label.equals(l)) {
1066                return node;
1067            }
1068        }
1069        return null;
1070    }
1071
1072    @Override
1073    public Node getLastVersion(Serializable versionSeriesId) {
1074        checkLive();
1075        List<Serializable> ids = context.getVersionIds(versionSeriesId);
1076        return ids.isEmpty() ? null : getNodeById(ids.get(ids.size() - 1));
1077    }
1078
1079    @Override
1080    public List<Node> getVersions(Serializable versionSeriesId) {
1081        checkLive();
1082        List<Serializable> ids = context.getVersionIds(versionSeriesId);
1083        List<Node> nodes = new ArrayList<Node>(ids.size());
1084        for (Serializable id : ids) {
1085            nodes.add(getNodeById(id));
1086        }
1087        return nodes;
1088    }
1089
1090    @Override
1091    public List<Node> getProxies(Node document, Node parent) {
1092        checkLive();
1093        if (!repository.getRepositoryDescriptor().getProxiesEnabled()) {
1094            return Collections.emptyList();
1095        }
1096
1097        List<Serializable> ids;
1098        if (document.isVersion()) {
1099            ids = context.getTargetProxyIds(document.getId());
1100        } else {
1101            Serializable versionSeriesId;
1102            if (document.isProxy()) {
1103                versionSeriesId = document.getSimpleProperty(Model.PROXY_VERSIONABLE_PROP).getValue();
1104            } else {
1105                versionSeriesId = document.getId();
1106            }
1107            ids = context.getSeriesProxyIds(versionSeriesId);
1108        }
1109
1110        List<Node> nodes = new LinkedList<Node>();
1111        for (Serializable id : ids) {
1112            Node node = getNodeById(id);
1113            if (node != null || Boolean.TRUE.booleanValue()) { // XXX
1114                // null if deleted, which means selection wasn't correctly
1115                // updated
1116                nodes.add(node);
1117            }
1118        }
1119
1120        if (parent != null) {
1121            // filter by parent
1122            Serializable parentId = parent.getId();
1123            for (Iterator<Node> it = nodes.iterator(); it.hasNext();) {
1124                Node node = it.next();
1125                if (!parentId.equals(node.getParentId())) {
1126                    it.remove();
1127                }
1128            }
1129        }
1130
1131        return nodes;
1132    }
1133
1134    /**
1135     * Fetches the hierarchy fragment for the given rows and all their ancestors.
1136     *
1137     * @param ids the fragment ids
1138     */
1139    protected List<Fragment> getHierarchyAndAncestors(Collection<Serializable> ids) {
1140        Set<Serializable> allIds = mapper.getAncestorsIds(ids);
1141        allIds.addAll(ids);
1142        List<RowId> rowIds = new ArrayList<RowId>(allIds.size());
1143        for (Serializable id : allIds) {
1144            rowIds.add(new RowId(Model.HIER_TABLE_NAME, id));
1145        }
1146        return context.getMulti(rowIds, true);
1147    }
1148
1149    @Override
1150    public PartialList<Serializable> query(String query, QueryFilter queryFilter, boolean countTotal) {
1151        final Timer.Context timerContext = queryTimer.time();
1152        try {
1153            return mapper.query(query, NXQL.NXQL, queryFilter, countTotal);
1154        } finally {
1155            timerContext.stop();
1156        }
1157    }
1158
1159    @Override
1160    public PartialList<Serializable> query(String query, String queryType, QueryFilter queryFilter, long countUpTo) {
1161        final Timer.Context timerContext = queryTimer.time();
1162        try {
1163            return mapper.query(query, queryType, queryFilter, countUpTo);
1164        } finally {
1165            long duration = timerContext.stop();
1166            if ((LOG_MIN_DURATION_NS >= 0) && (duration > LOG_MIN_DURATION_NS)) {
1167                String msg = String.format("duration_ms:\t%.2f\t%s %s\tquery\t%s", duration / 1000000.0, queryFilter,
1168                        countUpToAsString(countUpTo), query);
1169                if (log.isTraceEnabled()) {
1170                    log.info(msg, new Throwable("Slow query stack trace"));
1171                } else {
1172                    log.info(msg);
1173                }
1174            }
1175        }
1176    }
1177
1178    private String countUpToAsString(long countUpTo) {
1179        if (countUpTo > 0) {
1180            return String.format("count total results up to %d", countUpTo);
1181        }
1182        return countUpTo == -1 ? "count total results UNLIMITED" : "";
1183    }
1184
1185    @Override
1186    public IterableQueryResult queryAndFetch(String query, String queryType, QueryFilter queryFilter,
1187            Object... params) {
1188        final Timer.Context timerContext = queryTimer.time();
1189        try {
1190            return mapper.queryAndFetch(query, queryType, queryFilter, params);
1191        } finally {
1192            long duration = timerContext.stop();
1193            if ((LOG_MIN_DURATION_NS >= 0) && (duration > LOG_MIN_DURATION_NS)) {
1194                String msg = String.format("duration_ms:\t%.2f\t%s\tqueryAndFetch\t%s", duration / 1000000.0,
1195                        queryFilter, query);
1196                if (log.isTraceEnabled()) {
1197                    log.info(msg, new Throwable("Slow query stack trace"));
1198                } else {
1199                    log.info(msg);
1200                }
1201            }
1202        }
1203    }
1204
1205    @Override
1206    public LockManager getLockManager() {
1207        return repository.getLockManager();
1208    }
1209
1210    @Override
1211    public void requireReadAclsUpdate() {
1212        readAclsChanged = true;
1213    }
1214
1215    @Override
1216    public void updateReadAcls() {
1217        final Timer.Context timerContext = aclrUpdateTimer.time();
1218        try {
1219            mapper.updateReadAcls();
1220            readAclsChanged = false;
1221        } finally {
1222            timerContext.stop();
1223        }
1224    }
1225
1226    @Override
1227    public void rebuildReadAcls() {
1228        mapper.rebuildReadAcls();
1229        readAclsChanged = false;
1230    }
1231
1232    private void computeRootNode() {
1233        String repositoryId = repository.getName();
1234        Serializable rootId = mapper.getRootId(repositoryId);
1235        if (rootId == null && COMPAT_REPOSITORY_NAME) {
1236            // compat, old repositories had fixed id "default"
1237            rootId = mapper.getRootId("default");
1238        }
1239        if (rootId == null) {
1240            log.debug("Creating root");
1241            rootNode = addRootNode();
1242            addRootACP();
1243            save();
1244            // record information about the root id
1245            mapper.setRootId(repositoryId, rootNode.getId());
1246        } else {
1247            rootNode = getNodeById(rootId, false);
1248        }
1249    }
1250
1251    // TODO factor with addChildNode
1252    private Node addRootNode() {
1253        Serializable id = generateNewId(null);
1254        return addNode(id, null, "", null, Model.ROOT_TYPE, false);
1255    }
1256
1257    private void addRootACP() {
1258        ACLRow[] aclrows = new ACLRow[3];
1259        // TODO put groups in their proper place. like that now for consistency.
1260        aclrows[0] = new ACLRow(0, ACL.LOCAL_ACL, true, SecurityConstants.EVERYTHING, SecurityConstants.ADMINISTRATORS,
1261                null);
1262        aclrows[1] = new ACLRow(1, ACL.LOCAL_ACL, true, SecurityConstants.EVERYTHING, SecurityConstants.ADMINISTRATOR,
1263                null);
1264        aclrows[2] = new ACLRow(2, ACL.LOCAL_ACL, true, SecurityConstants.READ, SecurityConstants.MEMBERS, null);
1265        rootNode.setCollectionProperty(Model.ACL_PROP, aclrows);
1266        requireReadAclsUpdate();
1267    }
1268
1269    // public Node newNodeInstance() needed ?
1270
1271    public void checkPermission(String absPath, String actions) {
1272        checkLive();
1273        // TODO Auto-generated method stub
1274        throw new RuntimeException("Not implemented");
1275    }
1276
1277    public boolean hasPendingChanges() {
1278        checkLive();
1279        // TODO Auto-generated method stub
1280        throw new RuntimeException("Not implemented");
1281    }
1282
1283    public void markReferencedBinaries() {
1284        checkLive();
1285        mapper.markReferencedBinaries();
1286    }
1287
1288    public int cleanupDeletedDocuments(int max, Calendar beforeTime) {
1289        checkLive();
1290        if (!repository.getRepositoryDescriptor().getSoftDeleteEnabled()) {
1291            return 0;
1292        }
1293        return mapper.cleanupDeletedRows(max, beforeTime);
1294    }
1295
1296    /*
1297     * ----- XAResource -----
1298     */
1299
1300    @Override
1301    public boolean isSameRM(XAResource xaresource) {
1302        return xaresource == this;
1303    }
1304
1305    @Override
1306    public void start(Xid xid, int flags) throws XAException {
1307        if (flags == TMNOFLAGS) {
1308            try {
1309                processReceivedInvalidations();
1310            } catch (NuxeoException e) {
1311                log.error("Could not start transaction", e);
1312                throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e);
1313            }
1314        }
1315        mapper.start(xid, flags);
1316        inTransaction = true;
1317        checkThreadStart();
1318    }
1319
1320    @Override
1321    public void end(Xid xid, int flags) throws XAException {
1322        boolean failed = true;
1323        try {
1324            if (flags != TMFAIL) {
1325                try {
1326                    flush();
1327                } catch (ConcurrentUpdateException e) {
1328                    TransactionHelper.noteSuppressedException(e);
1329                    log.debug("Exception during transaction commit", e);
1330                    // set rollback only manually instead of throwing, this avoids
1331                    // a spurious log in Geronimo TransactionImpl and has the same effect
1332                    TransactionHelper.setTransactionRollbackOnly();
1333                    return;
1334                } catch (NuxeoException e) {
1335                    log.error("Exception during transaction commit", e);
1336                    throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e);
1337                }
1338            }
1339            failed = false;
1340            mapper.end(xid, flags);
1341        } finally {
1342            if (failed) {
1343                mapper.end(xid, TMFAIL);
1344                // rollback done by tx manager
1345            }
1346        }
1347    }
1348
1349    @Override
1350    public int prepare(Xid xid) throws XAException {
1351        int res = mapper.prepare(xid);
1352        if (res == XA_RDONLY) {
1353            // Read-only optimization, commit() won't be called by the TM.
1354            // It's important to nevertheless send invalidations because
1355            // Oracle, in tightly-coupled transaction mode, can return
1356            // this status even when some changes were actually made
1357            // (they just will be committed by another resource).
1358            // See NXP-7943
1359            commitDone();
1360        }
1361        return res;
1362    }
1363
1364    @Override
1365    public void commit(Xid xid, boolean onePhase) throws XAException {
1366        try {
1367            mapper.commit(xid, onePhase);
1368        } finally {
1369            commitDone();
1370        }
1371    }
1372
1373    protected void commitDone() throws XAException {
1374        inTransaction = false;
1375        try {
1376            try {
1377                sendInvalidationsToOthers();
1378            } finally {
1379                checkThreadEnd();
1380            }
1381        } catch (NuxeoException e) {
1382            log.error("Could not send invalidations", e);
1383            throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e);
1384        }
1385    }
1386
1387    @Override
1388    public void rollback(Xid xid) throws XAException {
1389        try {
1390            try {
1391                mapper.rollback(xid);
1392            } finally {
1393                rollback();
1394            }
1395        } finally {
1396            inTransaction = false;
1397            // no invalidations to send
1398            checkThreadEnd();
1399        }
1400    }
1401
1402    @Override
1403    public void forget(Xid xid) throws XAException {
1404        mapper.forget(xid);
1405    }
1406
1407    @Override
1408    public Xid[] recover(int flag) throws XAException {
1409        return mapper.recover(flag);
1410    }
1411
1412    @Override
1413    public boolean setTransactionTimeout(int seconds) throws XAException {
1414        return mapper.setTransactionTimeout(seconds);
1415    }
1416
1417    @Override
1418    public int getTransactionTimeout() throws XAException {
1419        return mapper.getTransactionTimeout();
1420    }
1421
1422    public long getCacheSize() {
1423        return context.getCacheSize();
1424    }
1425
1426    public long getCacheMapperSize() {
1427        return context.getCacheMapperSize();
1428    }
1429
1430    public long getCachePristineSize() {
1431        return context.getCachePristineSize();
1432    }
1433
1434    public long getCacheSelectionSize() {
1435        return context.getCacheSelectionSize();
1436    }
1437
1438    @Override
1439    public Map<String, String> getBinaryFulltext(Serializable id) {
1440        if (repository.getRepositoryDescriptor().getFulltextDisabled()) {
1441            return null;
1442        }
1443        RowId rowId = new RowId(Model.FULLTEXT_TABLE_NAME, id);
1444        return mapper.getBinaryFulltext(rowId);
1445    }
1446
1447}