001/*
002 * Copyright (c) 2006-2013 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * All rights reserved. This program and the accompanying materials
005 * are made available under the terms of the Eclipse Public License v1.0
006 * which accompanies this distribution, and is available at
007 * http://www.eclipse.org/legal/epl-v10.html
008 *
009 * Contributors:
010 *     Florent Guillaume
011 */
012package org.nuxeo.ecm.core.storage.sql;
013
014import java.io.Serializable;
015import java.text.Normalizer;
016import java.util.ArrayList;
017import java.util.Arrays;
018import java.util.Calendar;
019import java.util.Collection;
020import java.util.Collections;
021import java.util.HashMap;
022import java.util.HashSet;
023import java.util.Iterator;
024import java.util.LinkedList;
025import java.util.List;
026import java.util.Map;
027import java.util.Map.Entry;
028import java.util.Set;
029
030import javax.resource.ResourceException;
031import javax.resource.cci.ConnectionMetaData;
032import javax.resource.cci.Interaction;
033import javax.resource.cci.LocalTransaction;
034import javax.resource.cci.ResultSetInfo;
035import javax.transaction.xa.XAException;
036import javax.transaction.xa.XAResource;
037import javax.transaction.xa.Xid;
038
039import org.apache.commons.lang.StringUtils;
040import org.apache.commons.logging.Log;
041import org.apache.commons.logging.LogFactory;
042import org.nuxeo.ecm.core.api.ConcurrentUpdateException;
043import org.nuxeo.ecm.core.api.IterableQueryResult;
044import org.nuxeo.ecm.core.api.NuxeoException;
045import org.nuxeo.ecm.core.api.PartialList;
046import org.nuxeo.ecm.core.api.repository.RepositoryManager;
047import org.nuxeo.ecm.core.api.security.ACL;
048import org.nuxeo.ecm.core.api.security.SecurityConstants;
049import org.nuxeo.ecm.core.model.LockManager;
050import org.nuxeo.ecm.core.query.QueryFilter;
051import org.nuxeo.ecm.core.query.sql.NXQL;
052import org.nuxeo.ecm.core.schema.DocumentType;
053import org.nuxeo.ecm.core.schema.SchemaManager;
054import org.nuxeo.ecm.core.storage.FulltextParser;
055import org.nuxeo.ecm.core.storage.FulltextUpdaterWork;
056import org.nuxeo.ecm.core.storage.FulltextUpdaterWork.IndexAndText;
057import org.nuxeo.ecm.core.storage.sql.PersistenceContext.PathAndId;
058import org.nuxeo.ecm.core.storage.sql.RowMapper.RowBatch;
059import org.nuxeo.ecm.core.storage.sql.coremodel.SQLFulltextExtractorWork;
060import org.nuxeo.ecm.core.work.api.Work;
061import org.nuxeo.ecm.core.work.api.WorkManager;
062import org.nuxeo.ecm.core.work.api.WorkManager.Scheduling;
063import org.nuxeo.runtime.api.Framework;
064import org.nuxeo.runtime.metrics.MetricsService;
065import org.nuxeo.runtime.transaction.TransactionHelper;
066
067import com.codahale.metrics.MetricRegistry;
068import com.codahale.metrics.SharedMetricRegistries;
069import com.codahale.metrics.Timer;
070
071/**
072 * The session is the main high level access point to data from the underlying database.
073 */
074public class SessionImpl implements Session, XAResource {
075
076    private static final Log log = LogFactory.getLog(SessionImpl.class);
077
078    /**
079     * Set this system property to false if you don't want repositories to be looked up under the compatibility name
080     * "default" in the "repositories" table.
081     * <p>
082     * Only do this if you start from an empty database, or if you have migrated the "repositories" table by hand, or if
083     * you need to create a new repository in a database already containing a "default" repository (table sharing, not
084     * recommended).
085     */
086    public static final String COMPAT_REPOSITORY_NAME_KEY = "org.nuxeo.vcs.repository.name.default.compat";
087
088    private static final boolean COMPAT_REPOSITORY_NAME = Boolean.parseBoolean(Framework.getProperty(
089            COMPAT_REPOSITORY_NAME_KEY, "true"));
090
091    protected final RepositoryImpl repository;
092
093    private final Mapper mapper;
094
095    private final Model model;
096
097    protected final FulltextParser fulltextParser;
098
099    // public because used by unit tests
100    public final PersistenceContext context;
101
102    private volatile boolean live;
103
104    private boolean inTransaction;
105
106    private Node rootNode;
107
108    private long threadId;
109
110    private String threadName;
111
112    private Throwable threadStack;
113
114    private boolean readAclsChanged;
115
116    // @since 5.7
117    protected final MetricRegistry registry = SharedMetricRegistries.getOrCreate(MetricsService.class.getName());
118
119    private final Timer saveTimer;
120
121    private final Timer queryTimer;
122
123    private final Timer aclrUpdateTimer;
124
125    private static final java.lang.String LOG_MIN_DURATION_KEY = "org.nuxeo.vcs.query.log_min_duration_ms";
126
127    private static final long LOG_MIN_DURATION_NS = Long.parseLong(Framework.getProperty(LOG_MIN_DURATION_KEY, "-1")) * 1000000;
128
129    public SessionImpl(RepositoryImpl repository, Model model, Mapper mapper) {
130        this.repository = repository;
131        this.mapper = mapper;
132        this.model = model;
133        context = new PersistenceContext(model, mapper, this);
134        live = true;
135        readAclsChanged = false;
136
137        try {
138            fulltextParser = repository.fulltextParserClass.newInstance();
139        } catch (ReflectiveOperationException e) {
140            throw new NuxeoException(e);
141        }
142        saveTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(), "saves"));
143        queryTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(), "queries"));
144        aclrUpdateTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(),
145                "aclr-updates"));
146
147        computeRootNode();
148    }
149
150    public void checkLive() {
151        if (!live) {
152            throw new IllegalStateException("Session is not live");
153        }
154        checkThread();
155    }
156
157    // called by NetServlet when forwarding remote NetMapper calls.
158    @Override
159    public Mapper getMapper() {
160        return mapper;
161    }
162
163    /**
164     * Gets the XAResource. Called by the ManagedConnectionImpl, which actually wraps it in a connection-aware
165     * implementation.
166     */
167    public XAResource getXAResource() {
168        return this;
169    }
170
171    /**
172     * Clears all the caches. Called by RepositoryManagement.
173     */
174    protected int clearCaches() {
175        if (inTransaction) {
176            // avoid potential multi-threaded access to active session
177            return 0;
178        }
179        checkThreadEnd();
180        return context.clearCaches();
181    }
182
183    protected PersistenceContext getContext() {
184        return context;
185    }
186
187    protected void rollback() {
188        context.clearCaches();
189    }
190
191    protected void checkThread() {
192        if (threadId == 0) {
193            return;
194        }
195        long currentThreadId = Thread.currentThread().getId();
196        if (threadId == currentThreadId) {
197            return;
198        }
199        String currentThreadName = Thread.currentThread().getName();
200        String msg = String.format("Concurrency Error: Session was started in thread %s (%s)"
201                + " but is being used in thread %s (%s)", threadId, threadName, currentThreadId, currentThreadName);
202        throw new IllegalStateException(msg, threadStack);
203    }
204
205    protected void checkThreadStart() {
206        threadId = Thread.currentThread().getId();
207        threadName = Thread.currentThread().getName();
208        if (log.isDebugEnabled()) {
209            threadStack = new Throwable("owner stack trace");
210        }
211    }
212
213    protected void checkThreadEnd() {
214        threadId = 0;
215        threadName = null;
216        threadStack = null;
217    }
218
219    /**
220     * Generates a new id, or used a pre-generated one (import).
221     */
222    protected Serializable generateNewId(Serializable id) {
223        return context.generateNewId(id);
224    }
225
226    protected boolean isIdNew(Serializable id) {
227        return context.isIdNew(id);
228    }
229
230    /*
231     * ----- javax.resource.cci.Connection -----
232     */
233
234    @Override
235    public void close() throws ResourceException {
236        try {
237            checkLive();
238            closeSession();
239            repository.closeSession(this);
240        } catch (Exception cause) {
241            throw new ResourceException(cause);
242        }
243    }
244
245    protected void closeSession() {
246        live = false;
247        context.clearCaches();
248        // close the mapper and therefore the connection
249        mapper.close();
250        // don't clean the caches, we keep the pristine cache around
251        // TODO this is getting destroyed, we can clean everything
252    }
253
254    @Override
255    public Interaction createInteraction() throws ResourceException {
256        throw new UnsupportedOperationException();
257    }
258
259    @Override
260    public LocalTransaction getLocalTransaction() throws ResourceException {
261        throw new UnsupportedOperationException();
262    }
263
264    @Override
265    public ConnectionMetaData getMetaData() throws ResourceException {
266        throw new UnsupportedOperationException();
267    }
268
269    @Override
270    public ResultSetInfo getResultSetInfo() throws ResourceException {
271        throw new UnsupportedOperationException();
272    }
273
274    /*
275     * ----- Session -----
276     */
277
278    @Override
279    public boolean isLive() {
280        return live;
281    }
282
283    @Override
284    public boolean isStateSharedByAllThreadSessions() {
285        // only the JCA handle returns true
286        return false;
287    }
288
289    @Override
290    public String getRepositoryName() {
291        return repository.getName();
292    }
293
294    @Override
295    public Model getModel() {
296        return model;
297    }
298
299    @Override
300    public Node getRootNode() {
301        checkLive();
302        return rootNode;
303    }
304
305    @Override
306    public void save() {
307        final Timer.Context timerContext = saveTimer.time();
308        try {
309            checkLive();
310            flush();
311            if (!inTransaction) {
312                sendInvalidationsToOthers();
313                // as we don't have a way to know when the next
314                // non-transactional
315                // statement will start, process invalidations immediately
316            }
317            processReceivedInvalidations();
318        } finally {
319            timerContext.stop();
320        }
321    }
322
323    protected void flush() {
324        checkThread();
325        List<Work> works;
326        if (!repository.getRepositoryDescriptor().getFulltextDisabled()) {
327            works = getFulltextWorks();
328        } else {
329            works = Collections.emptyList();
330        }
331        doFlush();
332        if (readAclsChanged) {
333            updateReadAcls();
334        }
335        scheduleWork(works);
336        checkInvalidationsConflict();
337    }
338
339    protected void scheduleWork(List<Work> works) {
340        // do async fulltext indexing only if high-level sessions are available
341        RepositoryManager repositoryManager = Framework.getLocalService(RepositoryManager.class);
342        if (repositoryManager != null && !works.isEmpty()) {
343            WorkManager workManager = Framework.getLocalService(WorkManager.class);
344            for (Work work : works) {
345                // schedule work post-commit
346                // in non-tx mode, this may execute it nearly immediately
347                workManager.schedule(work, Scheduling.IF_NOT_SCHEDULED, true);
348            }
349        }
350    }
351
352    protected void doFlush() {
353        List<Fragment> fragmentsToClearDirty = new ArrayList<>(0);
354        RowBatch batch = context.getSaveBatch(fragmentsToClearDirty);
355        if (!batch.isEmpty()) {
356            log.debug("Saving session");
357            // execute the batch
358            mapper.write(batch);
359            log.debug("End of save");
360            for (Fragment fragment : fragmentsToClearDirty) {
361                fragment.clearDirty();
362            }
363        }
364    }
365
366    protected Serializable getContainingDocument(Serializable id) {
367        return context.getContainingDocument(id);
368    }
369
370    /**
371     * Gets the fulltext updates to do. Called at save() time.
372     *
373     * @return a list of {@link Work} instances to schedule post-commit.
374     */
375    protected List<Work> getFulltextWorks() {
376        Set<Serializable> dirtyStrings = new HashSet<Serializable>();
377        Set<Serializable> dirtyBinaries = new HashSet<Serializable>();
378        context.findDirtyDocuments(dirtyStrings, dirtyBinaries);
379        if (dirtyStrings.isEmpty() && dirtyBinaries.isEmpty()) {
380            return Collections.emptyList();
381        }
382
383        List<Work> works = new LinkedList<Work>();
384        getFulltextSimpleWorks(works, dirtyStrings);
385        getFulltextBinariesWorks(works, dirtyBinaries);
386        return works;
387    }
388
389    protected void getFulltextSimpleWorks(List<Work> works, Set<Serializable> dirtyStrings) {
390        // update simpletext on documents with dirty strings
391        for (Serializable docId : dirtyStrings) {
392            if (docId == null) {
393                // cannot happen, but has been observed :(
394                log.error("Got null doc id in fulltext update, cannot happen");
395                continue;
396            }
397            Node document = getNodeById(docId);
398            if (document == null) {
399                // cannot happen
400                continue;
401            }
402            if (document.isProxy()) {
403                // proxies don't have any fulltext attached, it's
404                // the target document that carries it
405                continue;
406            }
407            String documentType = document.getPrimaryType();
408            String[] mixinTypes = document.getMixinTypes();
409
410            if (!model.getFulltextConfiguration().isFulltextIndexable(documentType)) {
411                continue;
412            }
413            document.getSimpleProperty(Model.FULLTEXT_JOBID_PROP).setValue(model.idToString(document.getId()));
414            FulltextFinder fulltextFinder = new FulltextFinder(fulltextParser, document, this);
415            List<IndexAndText> indexesAndText = new LinkedList<IndexAndText>();
416            for (String indexName : model.getFulltextConfiguration().indexNames) {
417                Set<String> paths;
418                if (model.getFulltextConfiguration().indexesAllSimple.contains(indexName)) {
419                    // index all string fields, minus excluded ones
420                    // TODO XXX excluded ones...
421                    paths = model.getSimpleTextPropertyPaths(documentType, mixinTypes);
422                } else {
423                    // index configured fields
424                    paths = model.getFulltextConfiguration().propPathsByIndexSimple.get(indexName);
425                }
426                String text = fulltextFinder.findFulltext(paths);
427                indexesAndText.add(new IndexAndText(indexName, text));
428            }
429            if (!indexesAndText.isEmpty()) {
430                Work work = new FulltextUpdaterWork(repository.getName(), model.idToString(docId), true, false,
431                        indexesAndText);
432                works.add(work);
433            }
434        }
435    }
436
437    protected void getFulltextBinariesWorks(List<Work> works, final Set<Serializable> dirtyBinaries) {
438        if (dirtyBinaries.isEmpty()) {
439            return;
440        }
441
442        // mark indexing in progress, so that future copies (including versions)
443        // will be indexed as well
444        for (Node node : getNodesByIds(new ArrayList<Serializable>(dirtyBinaries))) {
445            if (!model.getFulltextConfiguration().isFulltextIndexable(node.getPrimaryType())) {
446                continue;
447            }
448            node.getSimpleProperty(Model.FULLTEXT_JOBID_PROP).setValue(model.idToString(node.getId()));
449        }
450
451        // FulltextExtractorWork does fulltext extraction using converters
452        // and then schedules a FulltextUpdaterWork to write the results
453        // single-threaded
454        for (Serializable id : dirtyBinaries) {
455            String docId = model.idToString(id);
456            Work work = new SQLFulltextExtractorWork(repository.getName(), docId);
457            works.add(work);
458        }
459    }
460
461    /**
462     * Finds the fulltext in a document and sends it to a fulltext parser.
463     *
464     * @since 5.9.5
465     */
466    protected static class FulltextFinder {
467
468        protected final FulltextParser fulltextParser;
469
470        protected final Node document;
471
472        protected final SessionImpl session;
473
474        protected final String documentType;
475
476        protected final String[] mixinTypes;
477
478        public FulltextFinder(FulltextParser fulltextParser, Node document, SessionImpl session) {
479            this.fulltextParser = fulltextParser;
480            this.document = document;
481            this.session = session;
482            if (document == null) {
483                documentType = null;
484                mixinTypes = null;
485            } else { // null in tests
486                documentType = document.getPrimaryType();
487                mixinTypes = document.getMixinTypes();
488            }
489        }
490
491        /**
492         * Parses the document for one index.
493         */
494        protected String findFulltext(Set<String> paths) {
495            if (paths == null) {
496                return "";
497            }
498            List<String> strings = new ArrayList<String>();
499
500            for (String path : paths) {
501                ModelProperty pi = session.getModel().getPathPropertyInfo(documentType, mixinTypes, path);
502                if (pi == null) {
503                    continue; // doc type doesn't have this property
504                }
505                if (pi.propertyType != PropertyType.STRING && pi.propertyType != PropertyType.ARRAY_STRING) {
506                    continue;
507                }
508
509                List<Node> nodes = new ArrayList<Node>(Collections.singleton(document));
510
511                String[] names = path.split("/");
512                for (int i = 0; i < names.length; i++) {
513                    String name = names[i];
514                    if (i < names.length - 1) {
515                        // traverse
516                        List<Node> newNodes;
517                        if ("*".equals(names[i + 1])) {
518                            // traverse complex list
519                            i++;
520                            newNodes = new ArrayList<Node>();
521                            for (Node node : nodes) {
522                                newNodes.addAll(session.getChildren(node, name, true));
523                            }
524                        } else {
525                            // traverse child
526                            newNodes = new ArrayList<Node>(nodes.size());
527                            for (Node node : nodes) {
528                                node = session.getChildNode(node, name, true);
529                                if (node != null) {
530                                    newNodes.add(node);
531                                }
532                            }
533                        }
534                        nodes = newNodes;
535                    } else {
536                        // last path component: get value
537                        for (Node node : nodes) {
538                            if (pi.propertyType == PropertyType.STRING) {
539                                String v = node.getSimpleProperty(name).getString();
540                                if (v != null) {
541                                    fulltextParser.parse(v, path, strings);
542                                }
543                            } else { /* ARRAY_STRING */
544                                for (Serializable v : node.getCollectionProperty(name).getValue()) {
545                                    if (v != null) {
546                                        fulltextParser.parse((String) v, path, strings);
547                                    }
548                                }
549                            }
550                        }
551                    }
552                }
553            }
554            return StringUtils.join(strings, ' ');
555        }
556    }
557
558    /**
559     * Post-transaction invalidations notification.
560     * <p>
561     * Called post-transaction by session commit/rollback or transactionless save.
562     */
563    protected void sendInvalidationsToOthers() {
564        context.sendInvalidationsToOthers();
565    }
566
567    /**
568     * Processes all invalidations accumulated.
569     * <p>
570     * Called pre-transaction by start or transactionless save;
571     */
572    protected void processReceivedInvalidations() {
573        context.processReceivedInvalidations();
574    }
575
576    /**
577     * Post transaction check invalidations processing.
578     */
579    protected void checkInvalidationsConflict() {
580        // repository.receiveClusterInvalidations(this);
581        context.checkInvalidationsConflict();
582    }
583
584    /*
585     * -------------------------------------------------------------
586     * -------------------------------------------------------------
587     * -------------------------------------------------------------
588     */
589
590    protected Node getNodeById(Serializable id, boolean prefetch) {
591        List<Node> nodes = getNodesByIds(Collections.singletonList(id), prefetch);
592        Node node = nodes.get(0);
593        // ((JDBCMapper) ((CachingMapper)
594        // mapper).mapper).logger.log("getNodeById " + id + " -> " + (node ==
595        // null ? "missing" : "found"));
596        return node;
597    }
598
599    @Override
600    public Node getNodeById(Serializable id) {
601        checkLive();
602        if (id == null) {
603            throw new IllegalArgumentException("Illegal null id");
604        }
605        return getNodeById(id, true);
606    }
607
608    public List<Node> getNodesByIds(List<Serializable> ids, boolean prefetch) {
609        // get hier fragments
610        List<RowId> hierRowIds = new ArrayList<RowId>(ids.size());
611        for (Serializable id : ids) {
612            hierRowIds.add(new RowId(Model.HIER_TABLE_NAME, id));
613        }
614
615        List<Fragment> hierFragments = context.getMulti(hierRowIds, false);
616
617        // find available paths
618        Map<Serializable, String> paths = new HashMap<Serializable, String>();
619        Set<Serializable> parentIds = new HashSet<Serializable>();
620        for (Fragment fragment : hierFragments) {
621            Serializable id = fragment.getId();
622            PathAndId pathOrId = context.getPathOrMissingParentId((SimpleFragment) fragment, false);
623            // find missing fragments
624            if (pathOrId.path != null) {
625                paths.put(id, pathOrId.path);
626            } else {
627                parentIds.add(pathOrId.id);
628            }
629        }
630        // fetch the missing parents and their ancestors in bulk
631        if (!parentIds.isEmpty()) {
632            // fetch them in the context
633            getHierarchyAndAncestors(parentIds);
634            // compute missing paths using context
635            for (Fragment fragment : hierFragments) {
636                Serializable id = fragment.getId();
637                if (paths.containsKey(id)) {
638                    continue;
639                }
640                String path = context.getPath((SimpleFragment) fragment);
641                paths.put(id, path);
642            }
643        }
644
645        // prepare fragment groups to build nodes
646        Map<Serializable, FragmentGroup> fragmentGroups = new HashMap<Serializable, FragmentGroup>(ids.size());
647        for (Fragment fragment : hierFragments) {
648            Serializable id = fragment.row.id;
649            fragmentGroups.put(id, new FragmentGroup((SimpleFragment) fragment, new FragmentsMap()));
650        }
651
652        if (prefetch) {
653            List<RowId> bulkRowIds = new ArrayList<RowId>();
654            Set<Serializable> proxyIds = new HashSet<Serializable>();
655
656            // get rows to prefetch for hier fragments
657            for (Fragment fragment : hierFragments) {
658                findPrefetchedFragments((SimpleFragment) fragment, bulkRowIds, proxyIds);
659            }
660
661            // proxies
662
663            // get proxies fragments
664            List<RowId> proxiesRowIds = new ArrayList<RowId>(proxyIds.size());
665            for (Serializable id : proxyIds) {
666                proxiesRowIds.add(new RowId(Model.PROXY_TABLE_NAME, id));
667            }
668            List<Fragment> proxiesFragments = context.getMulti(proxiesRowIds, true);
669            Set<Serializable> targetIds = new HashSet<Serializable>();
670            for (Fragment fragment : proxiesFragments) {
671                Serializable targetId = ((SimpleFragment) fragment).get(Model.PROXY_TARGET_KEY);
672                targetIds.add(targetId);
673            }
674
675            // get hier fragments for proxies' targets
676            targetIds.removeAll(ids); // only those we don't have already
677            hierRowIds = new ArrayList<RowId>(targetIds.size());
678            for (Serializable id : targetIds) {
679                hierRowIds.add(new RowId(Model.HIER_TABLE_NAME, id));
680            }
681            hierFragments = context.getMulti(hierRowIds, true);
682            for (Fragment fragment : hierFragments) {
683                findPrefetchedFragments((SimpleFragment) fragment, bulkRowIds, null);
684            }
685
686            // we have everything to be prefetched
687
688            // fetch all the prefetches in bulk
689            List<Fragment> fragments = context.getMulti(bulkRowIds, true);
690
691            // put each fragment in the map of the proper group
692            for (Fragment fragment : fragments) {
693                FragmentGroup fragmentGroup = fragmentGroups.get(fragment.row.id);
694                if (fragmentGroup != null) {
695                    fragmentGroup.fragments.put(fragment.row.tableName, fragment);
696                }
697            }
698        }
699
700        // assemble nodes from the fragment groups
701        List<Node> nodes = new ArrayList<Node>(ids.size());
702        for (Serializable id : ids) {
703            FragmentGroup fragmentGroup = fragmentGroups.get(id);
704            // null if deleted/absent
705            Node node = fragmentGroup == null ? null : new Node(context, fragmentGroup, paths.get(id));
706            nodes.add(node);
707        }
708
709        return nodes;
710    }
711
712    /**
713     * Finds prefetched fragments for a hierarchy fragment, takes note of the ones that are proxies.
714     */
715    protected void findPrefetchedFragments(SimpleFragment hierFragment, List<RowId> bulkRowIds,
716            Set<Serializable> proxyIds) {
717        Serializable id = hierFragment.row.id;
718
719        // find type
720        String typeName = (String) hierFragment.get(Model.MAIN_PRIMARY_TYPE_KEY);
721        if (Model.PROXY_TYPE.equals(typeName)) {
722            if (proxyIds != null) {
723                proxyIds.add(id);
724            }
725            return;
726        }
727
728        // find table names
729        Set<String> tableNames = model.getTypePrefetchedFragments(typeName);
730        if (tableNames == null) {
731            return; // unknown (obsolete) type
732        }
733
734        // add row id for each table name
735        Serializable parentId = hierFragment.get(Model.HIER_PARENT_KEY);
736        for (String tableName : tableNames) {
737            if (Model.HIER_TABLE_NAME.equals(tableName)) {
738                continue; // already fetched
739            }
740            if (parentId != null && Model.VERSION_TABLE_NAME.equals(tableName)) {
741                continue; // not a version, don't fetch this table
742                // TODO incorrect if we have filed versions
743            }
744            bulkRowIds.add(new RowId(tableName, id));
745        }
746    }
747
748    @Override
749    public List<Node> getNodesByIds(List<Serializable> ids) {
750        checkLive();
751        return getNodesByIds(ids, true);
752    }
753
754    @Override
755    public Node getParentNode(Node node) {
756        checkLive();
757        if (node == null) {
758            throw new IllegalArgumentException("Illegal null node");
759        }
760        Serializable id = node.getHierFragment().get(Model.HIER_PARENT_KEY);
761        return id == null ? null : getNodeById(id);
762    }
763
764    @Override
765    public String getPath(Node node) {
766        checkLive();
767        String path = node.getPath();
768        if (path == null) {
769            path = context.getPath(node.getHierFragment());
770        }
771        return path;
772    }
773
774    /*
775     * Normalize using NFC to avoid decomposed characters (like 'e' + COMBINING ACUTE ACCENT instead of LATIN SMALL
776     * LETTER E WITH ACUTE). NFKC (normalization using compatibility decomposition) is not used, because compatibility
777     * decomposition turns some characters (LATIN SMALL LIGATURE FFI, TRADE MARK SIGN, FULLWIDTH SOLIDUS) into a series
778     * of characters ('f'+'f'+'i', 'T'+'M', '/') that cannot be re-composed into the original, and therefore loses
779     * information.
780     */
781    protected String normalize(String path) {
782        return Normalizer.normalize(path, Normalizer.Form.NFC);
783    }
784
785    /* Does not apply to properties for now (no use case). */
786    @Override
787    public Node getNodeByPath(String path, Node node) {
788        // TODO optimize this to use a dedicated path-based table
789        checkLive();
790        if (path == null) {
791            throw new IllegalArgumentException("Illegal null path");
792        }
793        path = normalize(path);
794        int i;
795        if (path.startsWith("/")) {
796            node = getRootNode();
797            if (path.equals("/")) {
798                return node;
799            }
800            i = 1;
801        } else {
802            if (node == null) {
803                throw new IllegalArgumentException("Illegal relative path with null node: " + path);
804            }
805            i = 0;
806        }
807        String[] names = path.split("/", -1);
808        for (; i < names.length; i++) {
809            String name = names[i];
810            if (name.length() == 0) {
811                throw new IllegalArgumentException("Illegal path with empty component: " + path);
812            }
813            node = getChildNode(node, name, false);
814            if (node == null) {
815                return null;
816            }
817        }
818        return node;
819    }
820
821    @Override
822    public boolean addMixinType(Node node, String mixin) {
823        if (model.getMixinPropertyInfos(mixin) == null) {
824            throw new IllegalArgumentException("No such mixin: " + mixin);
825        }
826        if (model.getDocumentTypeFacets(node.getPrimaryType()).contains(mixin)) {
827            return false; // already present in type
828        }
829        List<String> list = new ArrayList<String>(Arrays.asList(node.getMixinTypes()));
830        if (list.contains(mixin)) {
831            return false; // already present in node
832        }
833        list.add(mixin);
834        String[] mixins = list.toArray(new String[list.size()]);
835        node.hierFragment.put(Model.MAIN_MIXIN_TYPES_KEY, mixins);
836        // immediately create child nodes (for complex properties) in order
837        // to avoid concurrency issue later on
838        Map<String, String> childrenTypes = model.getMixinComplexChildren(mixin);
839        for (Entry<String, String> es : childrenTypes.entrySet()) {
840            String childName = es.getKey();
841            String childType = es.getValue();
842            addChildNode(node, childName, null, childType, true);
843        }
844        return true;
845    }
846
847    @Override
848    public boolean removeMixinType(Node node, String mixin) {
849        List<String> list = new ArrayList<String>(Arrays.asList(node.getMixinTypes()));
850        if (!list.remove(mixin)) {
851            return false; // not present in node
852        }
853        String[] mixins = list.toArray(new String[list.size()]);
854        if (mixins.length == 0) {
855            mixins = null;
856        }
857        node.hierFragment.put(Model.MAIN_MIXIN_TYPES_KEY, mixins);
858        // remove child nodes
859        Map<String, String> childrenTypes = model.getMixinComplexChildren(mixin);
860        for (String childName : childrenTypes.keySet()) {
861            Node child = getChildNode(node, childName, true);
862            removePropertyNode(child);
863        }
864        node.clearCache();
865        return true;
866    }
867
868    @Override
869    public Node addChildNode(Node parent, String name, Long pos, String typeName, boolean complexProp) {
870        if (pos == null && !complexProp && parent != null) {
871            pos = context.getNextPos(parent.getId(), complexProp);
872        }
873        return addChildNode(null, parent, name, pos, typeName, complexProp);
874    }
875
876    @Override
877    public Node addChildNode(Serializable id, Node parent, String name, Long pos, String typeName,
878            boolean complexProp) {
879        checkLive();
880        if (name == null) {
881            throw new IllegalArgumentException("Illegal null name");
882        }
883        name = normalize(name);
884        if (name.contains("/") || name.equals(".") || name.equals("..")) {
885            throw new IllegalArgumentException("Illegal name: " + name);
886        }
887        if (!model.isType(typeName)) {
888            throw new IllegalArgumentException("Unknown type: " + typeName);
889        }
890        id = generateNewId(id);
891        Serializable parentId = parent == null ? null : parent.hierFragment.getId();
892        Node node = addNode(id, parentId, name, pos, typeName, complexProp);
893        // immediately create child nodes (for complex properties) in order
894        // to avoid concurrency issue later on
895        Map<String, String> childrenTypes = model.getTypeComplexChildren(typeName);
896        for (Entry<String, String> es : childrenTypes.entrySet()) {
897            String childName = es.getKey();
898            String childType = es.getValue();
899            addChildNode(node, childName, null, childType, true);
900        }
901        return node;
902    }
903
904    protected Node addNode(Serializable id, Serializable parentId, String name, Long pos, String typeName,
905            boolean complexProp) {
906        requireReadAclsUpdate();
907        // main info
908        Row hierRow = new Row(Model.HIER_TABLE_NAME, id);
909        hierRow.putNew(Model.HIER_PARENT_KEY, parentId);
910        hierRow.putNew(Model.HIER_CHILD_NAME_KEY, name);
911        hierRow.putNew(Model.HIER_CHILD_POS_KEY, pos);
912        hierRow.putNew(Model.MAIN_PRIMARY_TYPE_KEY, typeName);
913        hierRow.putNew(Model.HIER_CHILD_ISPROPERTY_KEY, Boolean.valueOf(complexProp));
914        SimpleFragment hierFragment = context.createHierarchyFragment(hierRow);
915        FragmentGroup fragmentGroup = new FragmentGroup(hierFragment, new FragmentsMap());
916        return new Node(context, fragmentGroup, context.getPath(hierFragment));
917    }
918
919    @Override
920    public Node addProxy(Serializable targetId, Serializable versionableId, Node parent, String name, Long pos) {
921        if (!repository.getRepositoryDescriptor().getProxiesEnabled()) {
922            throw new NuxeoException("Proxies are disabled by configuration");
923        }
924        Node proxy = addChildNode(parent, name, pos, Model.PROXY_TYPE, false);
925        proxy.setSimpleProperty(Model.PROXY_TARGET_PROP, targetId);
926        proxy.setSimpleProperty(Model.PROXY_VERSIONABLE_PROP, versionableId);
927        SimpleFragment proxyFragment = (SimpleFragment) proxy.fragments.get(Model.PROXY_TABLE_NAME);
928        context.createdProxyFragment(proxyFragment);
929        return proxy;
930    }
931
932    @Override
933    public void setProxyTarget(Node proxy, Serializable targetId) {
934        if (!repository.getRepositoryDescriptor().getProxiesEnabled()) {
935            throw new NuxeoException("Proxies are disabled by configuration");
936        }
937        SimpleProperty prop = proxy.getSimpleProperty(Model.PROXY_TARGET_PROP);
938        Serializable oldTargetId = prop.getValue();
939        if (!oldTargetId.equals(targetId)) {
940            SimpleFragment proxyFragment = (SimpleFragment) proxy.fragments.get(Model.PROXY_TABLE_NAME);
941            context.removedProxyTarget(proxyFragment);
942            proxy.setSimpleProperty(Model.PROXY_TARGET_PROP, targetId);
943            context.addedProxyTarget(proxyFragment);
944        }
945    }
946
947    @Override
948    public boolean hasChildNode(Node parent, String name, boolean complexProp) {
949        checkLive();
950        // TODO could optimize further by not fetching the fragment at all
951        SimpleFragment fragment = context.getChildHierByName(parent.getId(), normalize(name), complexProp);
952        return fragment != null;
953    }
954
955    @Override
956    public Node getChildNode(Node parent, String name, boolean complexProp) {
957        checkLive();
958        if (name == null || name.contains("/") || name.equals(".") || name.equals("..")) {
959            throw new IllegalArgumentException("Illegal name: " + name);
960        }
961        SimpleFragment fragment = context.getChildHierByName(parent.getId(), name, complexProp);
962        return fragment == null ? null : getNodeById(fragment.getId());
963    }
964
965    // TODO optimize with dedicated backend call
966    @Override
967    public boolean hasChildren(Node parent, boolean complexProp) {
968        checkLive();
969        List<SimpleFragment> children = context.getChildren(parent.getId(), null, complexProp);
970        if (complexProp) {
971            return !children.isEmpty();
972        }
973        if (children.isEmpty()) {
974            return false;
975        }
976        // we have to check that type names are not obsolete, as they wouldn't be returned
977        // by getChildren and we must be consistent
978        SchemaManager schemaManager = Framework.getService(SchemaManager.class);
979        for (SimpleFragment simpleFragment : children) {
980            String primaryType = simpleFragment.getString(Model.MAIN_PRIMARY_TYPE_KEY);
981            if (primaryType.equals(Model.PROXY_TYPE)) {
982                Node node = getNodeById(simpleFragment.getId(), false);
983                Serializable targetId = node.getSimpleProperty(Model.PROXY_TARGET_PROP).getValue();
984                if (targetId == null) {
985                    // missing target, should not happen, ignore
986                    continue;
987                }
988                Node target = getNodeById(targetId, false);
989                if (target == null) {
990                    continue;
991                }
992                primaryType = target.getPrimaryType();
993            }
994            DocumentType type = schemaManager.getDocumentType(primaryType);
995            if (type == null) {
996                // obsolete type, ignored in getChildren
997                continue;
998            }
999            return true;
1000        }
1001        return false;
1002    }
1003
1004    @Override
1005    public List<Node> getChildren(Node parent, String name, boolean complexProp) {
1006        checkLive();
1007        List<SimpleFragment> fragments = context.getChildren(parent.getId(), name, complexProp);
1008        List<Node> nodes = new ArrayList<Node>(fragments.size());
1009        for (SimpleFragment fragment : fragments) {
1010            Node node = getNodeById(fragment.getId());
1011            if (node == null) {
1012                // cannot happen
1013                log.error("Child node cannot be created: " + fragment.getId());
1014                continue;
1015            }
1016            nodes.add(node);
1017        }
1018        return nodes;
1019    }
1020
1021    @Override
1022    public void orderBefore(Node parent, Node source, Node dest) {
1023        checkLive();
1024        context.orderBefore(parent.getId(), source.getId(), dest == null ? null : dest.getId());
1025    }
1026
1027    @Override
1028    public Node move(Node source, Node parent, String name) {
1029        checkLive();
1030        if (!parent.getId().equals(source.getParentId())) {
1031            flush(); // needed when doing many moves for circular stuff
1032        }
1033        context.move(source, parent.getId(), name);
1034        requireReadAclsUpdate();
1035        return source;
1036    }
1037
1038    @Override
1039    public Node copy(Node source, Node parent, String name) {
1040        checkLive();
1041        flush();
1042        Serializable id = context.copy(source, parent.getId(), name);
1043        requireReadAclsUpdate();
1044        return getNodeById(id);
1045    }
1046
1047    @Override
1048    public void removeNode(Node node) {
1049        checkLive();
1050        flush();
1051        // remove the lock using the lock manager
1052        // TODO children locks?
1053        getLockManager().removeLock(model.idToString(node.getId()), null);
1054        context.removeNode(node.getHierFragment());
1055    }
1056
1057    @Override
1058    public void removePropertyNode(Node node) {
1059        checkLive();
1060        // no flush needed
1061        context.removePropertyNode(node.getHierFragment());
1062    }
1063
1064    @Override
1065    public Node checkIn(Node node, String label, String checkinComment) {
1066        checkLive();
1067        flush();
1068        Serializable id = context.checkIn(node, label, checkinComment);
1069        requireReadAclsUpdate();
1070        // save to reflect changes immediately in database
1071        flush();
1072        return getNodeById(id);
1073    }
1074
1075    @Override
1076    public void checkOut(Node node) {
1077        checkLive();
1078        context.checkOut(node);
1079        requireReadAclsUpdate();
1080    }
1081
1082    @Override
1083    public void restore(Node node, Node version) {
1084        checkLive();
1085        // save done inside method
1086        context.restoreVersion(node, version);
1087        requireReadAclsUpdate();
1088    }
1089
1090    @Override
1091    public Node getVersionByLabel(Serializable versionSeriesId, String label) {
1092        if (label == null) {
1093            return null;
1094        }
1095        List<Node> versions = getVersions(versionSeriesId);
1096        for (Node node : versions) {
1097            String l = (String) node.getSimpleProperty(Model.VERSION_LABEL_PROP).getValue();
1098            if (label.equals(l)) {
1099                return node;
1100            }
1101        }
1102        return null;
1103    }
1104
1105    @Override
1106    public Node getLastVersion(Serializable versionSeriesId) {
1107        checkLive();
1108        List<Serializable> ids = context.getVersionIds(versionSeriesId);
1109        return ids.isEmpty() ? null : getNodeById(ids.get(ids.size() - 1));
1110    }
1111
1112    @Override
1113    public List<Node> getVersions(Serializable versionSeriesId) {
1114        checkLive();
1115        List<Serializable> ids = context.getVersionIds(versionSeriesId);
1116        List<Node> nodes = new ArrayList<Node>(ids.size());
1117        for (Serializable id : ids) {
1118            nodes.add(getNodeById(id));
1119        }
1120        return nodes;
1121    }
1122
1123    @Override
1124    public List<Node> getProxies(Node document, Node parent) {
1125        checkLive();
1126        if (!repository.getRepositoryDescriptor().getProxiesEnabled()) {
1127            return Collections.emptyList();
1128        }
1129
1130        List<Serializable> ids;
1131        if (document.isVersion()) {
1132            ids = context.getTargetProxyIds(document.getId());
1133        } else {
1134            Serializable versionSeriesId;
1135            if (document.isProxy()) {
1136                versionSeriesId = document.getSimpleProperty(Model.PROXY_VERSIONABLE_PROP).getValue();
1137            } else {
1138                versionSeriesId = document.getId();
1139            }
1140            ids = context.getSeriesProxyIds(versionSeriesId);
1141        }
1142
1143        List<Node> nodes = new LinkedList<Node>();
1144        for (Serializable id : ids) {
1145            Node node = getNodeById(id);
1146            if (node != null || Boolean.TRUE.booleanValue()) { // XXX
1147                // null if deleted, which means selection wasn't correctly
1148                // updated
1149                nodes.add(node);
1150            }
1151        }
1152
1153        if (parent != null) {
1154            // filter by parent
1155            Serializable parentId = parent.getId();
1156            for (Iterator<Node> it = nodes.iterator(); it.hasNext();) {
1157                Node node = it.next();
1158                if (!parentId.equals(node.getParentId())) {
1159                    it.remove();
1160                }
1161            }
1162        }
1163
1164        return nodes;
1165    }
1166
1167    /**
1168     * Fetches the hierarchy fragment for the given rows and all their ancestors.
1169     *
1170     * @param ids the fragment ids
1171     */
1172    protected List<Fragment> getHierarchyAndAncestors(Collection<Serializable> ids) {
1173        Set<Serializable> allIds = mapper.getAncestorsIds(ids);
1174        allIds.addAll(ids);
1175        List<RowId> rowIds = new ArrayList<RowId>(allIds.size());
1176        for (Serializable id : allIds) {
1177            rowIds.add(new RowId(Model.HIER_TABLE_NAME, id));
1178        }
1179        return context.getMulti(rowIds, true);
1180    }
1181
1182    @Override
1183    public PartialList<Serializable> query(String query, QueryFilter queryFilter, boolean countTotal) {
1184        final Timer.Context timerContext = queryTimer.time();
1185        try {
1186            return mapper.query(query, NXQL.NXQL, queryFilter, countTotal);
1187        } finally {
1188            timerContext.stop();
1189        }
1190    }
1191
1192    @Override
1193    public PartialList<Serializable> query(String query, String queryType, QueryFilter queryFilter, long countUpTo) {
1194        final Timer.Context timerContext = queryTimer.time();
1195        try {
1196            return mapper.query(query, queryType, queryFilter, countUpTo);
1197        } finally {
1198            long duration = timerContext.stop();
1199            if ((LOG_MIN_DURATION_NS >= 0) && (duration > LOG_MIN_DURATION_NS)) {
1200                String msg = String.format("duration_ms:\t%.2f\t%s %s\tquery\t%s", duration / 1000000.0, queryFilter,
1201                        countUpToAsString(countUpTo), query);
1202                if (log.isTraceEnabled()) {
1203                    log.info(msg, new Throwable("Slow query stack trace"));
1204                } else {
1205                    log.info(msg);
1206                }
1207            }
1208        }
1209    }
1210
1211    private String countUpToAsString(long countUpTo) {
1212        if (countUpTo > 0) {
1213            return String.format("count total results up to %d", countUpTo);
1214        }
1215        return countUpTo == -1 ? "count total results UNLIMITED" : "";
1216    }
1217
1218    @Override
1219    public IterableQueryResult queryAndFetch(String query, String queryType, QueryFilter queryFilter,
1220            Object... params) {
1221        final Timer.Context timerContext = queryTimer.time();
1222        try {
1223            return mapper.queryAndFetch(query, queryType, queryFilter, params);
1224        } finally {
1225            long duration = timerContext.stop();
1226            if ((LOG_MIN_DURATION_NS >= 0) && (duration > LOG_MIN_DURATION_NS)) {
1227                String msg = String.format("duration_ms:\t%.2f\t%s\tqueryAndFetch\t%s", duration / 1000000.0,
1228                        queryFilter, query);
1229                if (log.isTraceEnabled()) {
1230                    log.info(msg, new Throwable("Slow query stack trace"));
1231                } else {
1232                    log.info(msg);
1233                }
1234            }
1235        }
1236    }
1237
1238    @Override
1239    public LockManager getLockManager() {
1240        return repository.getLockManager();
1241    }
1242
1243    @Override
1244    public void requireReadAclsUpdate() {
1245        readAclsChanged = true;
1246    }
1247
1248    @Override
1249    public void updateReadAcls() {
1250        final Timer.Context timerContext = aclrUpdateTimer.time();
1251        try {
1252            mapper.updateReadAcls();
1253            readAclsChanged = false;
1254        } finally {
1255            timerContext.stop();
1256        }
1257    }
1258
1259    @Override
1260    public void rebuildReadAcls() {
1261        mapper.rebuildReadAcls();
1262        readAclsChanged = false;
1263    }
1264
1265    private void computeRootNode() {
1266        String repositoryId = repository.getName();
1267        Serializable rootId = mapper.getRootId(repositoryId);
1268        if (rootId == null && COMPAT_REPOSITORY_NAME) {
1269            // compat, old repositories had fixed id "default"
1270            rootId = mapper.getRootId("default");
1271        }
1272        if (rootId == null) {
1273            log.debug("Creating root");
1274            rootNode = addRootNode();
1275            addRootACP();
1276            save();
1277            // record information about the root id
1278            mapper.setRootId(repositoryId, rootNode.getId());
1279        } else {
1280            rootNode = getNodeById(rootId, false);
1281        }
1282    }
1283
1284    // TODO factor with addChildNode
1285    private Node addRootNode() {
1286        Serializable id = generateNewId(null);
1287        return addNode(id, null, "", null, Model.ROOT_TYPE, false);
1288    }
1289
1290    private void addRootACP() {
1291        ACLRow[] aclrows = new ACLRow[3];
1292        // TODO put groups in their proper place. like that now for consistency.
1293        aclrows[0] = new ACLRow(0, ACL.LOCAL_ACL, true, SecurityConstants.EVERYTHING, SecurityConstants.ADMINISTRATORS,
1294                null);
1295        aclrows[1] = new ACLRow(1, ACL.LOCAL_ACL, true, SecurityConstants.EVERYTHING, SecurityConstants.ADMINISTRATOR,
1296                null);
1297        aclrows[2] = new ACLRow(2, ACL.LOCAL_ACL, true, SecurityConstants.READ, SecurityConstants.MEMBERS, null);
1298        rootNode.setCollectionProperty(Model.ACL_PROP, aclrows);
1299        requireReadAclsUpdate();
1300    }
1301
1302    // public Node newNodeInstance() needed ?
1303
1304    public void checkPermission(String absPath, String actions) {
1305        checkLive();
1306        // TODO Auto-generated method stub
1307        throw new RuntimeException("Not implemented");
1308    }
1309
1310    public boolean hasPendingChanges() {
1311        checkLive();
1312        // TODO Auto-generated method stub
1313        throw new RuntimeException("Not implemented");
1314    }
1315
1316    public void markReferencedBinaries() {
1317        checkLive();
1318        mapper.markReferencedBinaries();
1319    }
1320
1321    public int cleanupDeletedDocuments(int max, Calendar beforeTime) {
1322        checkLive();
1323        if (!repository.getRepositoryDescriptor().getSoftDeleteEnabled()) {
1324            return 0;
1325        }
1326        return mapper.cleanupDeletedRows(max, beforeTime);
1327    }
1328
1329    /*
1330     * ----- XAResource -----
1331     */
1332
1333    @Override
1334    public boolean isSameRM(XAResource xaresource) {
1335        return xaresource == this;
1336    }
1337
1338    @Override
1339    public void start(Xid xid, int flags) throws XAException {
1340        if (flags == TMNOFLAGS) {
1341            try {
1342                processReceivedInvalidations();
1343            } catch (NuxeoException e) {
1344                log.error("Could not start transaction", e);
1345                throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e);
1346            }
1347        }
1348        mapper.start(xid, flags);
1349        inTransaction = true;
1350        checkThreadStart();
1351    }
1352
1353    @Override
1354    public void end(Xid xid, int flags) throws XAException {
1355        boolean failed = true;
1356        try {
1357            if (flags != TMFAIL) {
1358                try {
1359                    flush();
1360                } catch (ConcurrentUpdateException e) {
1361                    TransactionHelper.noteSuppressedException(e);
1362                    log.debug("Exception during transaction commit", e);
1363                    // set rollback only manually instead of throwing, this avoids
1364                    // a spurious log in Geronimo TransactionImpl and has the same effect
1365                    TransactionHelper.setTransactionRollbackOnly();
1366                    return;
1367                } catch (NuxeoException e) {
1368                    log.error("Exception during transaction commit", e);
1369                    throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e);
1370                }
1371            }
1372            failed = false;
1373            mapper.end(xid, flags);
1374        } finally {
1375            if (failed) {
1376                mapper.end(xid, TMFAIL);
1377                // rollback done by tx manager
1378            }
1379        }
1380    }
1381
1382    @Override
1383    public int prepare(Xid xid) throws XAException {
1384        int res = mapper.prepare(xid);
1385        if (res == XA_RDONLY) {
1386            // Read-only optimization, commit() won't be called by the TM.
1387            // It's important to nevertheless send invalidations because
1388            // Oracle, in tightly-coupled transaction mode, can return
1389            // this status even when some changes were actually made
1390            // (they just will be committed by another resource).
1391            // See NXP-7943
1392            commitDone();
1393        }
1394        return res;
1395    }
1396
1397    @Override
1398    public void commit(Xid xid, boolean onePhase) throws XAException {
1399        try {
1400            mapper.commit(xid, onePhase);
1401        } finally {
1402            commitDone();
1403        }
1404    }
1405
1406    protected void commitDone() throws XAException {
1407        inTransaction = false;
1408        try {
1409            try {
1410                sendInvalidationsToOthers();
1411            } finally {
1412                checkThreadEnd();
1413            }
1414        } catch (NuxeoException e) {
1415            log.error("Could not send invalidations", e);
1416            throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e);
1417        }
1418    }
1419
1420    @Override
1421    public void rollback(Xid xid) throws XAException {
1422        try {
1423            try {
1424                mapper.rollback(xid);
1425            } finally {
1426                rollback();
1427            }
1428        } finally {
1429            inTransaction = false;
1430            // no invalidations to send
1431            checkThreadEnd();
1432        }
1433    }
1434
1435    @Override
1436    public void forget(Xid xid) throws XAException {
1437        mapper.forget(xid);
1438    }
1439
1440    @Override
1441    public Xid[] recover(int flag) throws XAException {
1442        return mapper.recover(flag);
1443    }
1444
1445    @Override
1446    public boolean setTransactionTimeout(int seconds) throws XAException {
1447        return mapper.setTransactionTimeout(seconds);
1448    }
1449
1450    @Override
1451    public int getTransactionTimeout() throws XAException {
1452        return mapper.getTransactionTimeout();
1453    }
1454
1455    public long getCacheSize() {
1456        return context.getCacheSize();
1457    }
1458
1459    public long getCacheMapperSize() {
1460        return context.getCacheMapperSize();
1461    }
1462
1463    public long getCachePristineSize() {
1464        return context.getCachePristineSize();
1465    }
1466
1467    public long getCacheSelectionSize() {
1468        return context.getCacheSelectionSize();
1469    }
1470
1471    @Override
1472    public Map<String, String> getBinaryFulltext(Serializable id) {
1473        if (repository.getRepositoryDescriptor().getFulltextDisabled()) {
1474            return null;
1475        }
1476        RowId rowId = new RowId(Model.FULLTEXT_TABLE_NAME, id);
1477        return mapper.getBinaryFulltext(rowId);
1478    }
1479
1480}