001/* 002 * (C) Copyright 2006-2018 Nuxeo (http://nuxeo.com/) and others. 003 * 004 * Licensed under the Apache License, Version 2.0 (the "License"); 005 * you may not use this file except in compliance with the License. 006 * You may obtain a copy of the License at 007 * 008 * http://www.apache.org/licenses/LICENSE-2.0 009 * 010 * Unless required by applicable law or agreed to in writing, software 011 * distributed under the License is distributed on an "AS IS" BASIS, 012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 013 * See the License for the specific language governing permissions and 014 * limitations under the License. 015 * 016 * Contributors: 017 * Florent Guillaume 018 */ 019package org.nuxeo.ecm.core.storage.sql; 020 021import java.io.Serializable; 022import java.text.Normalizer; 023import java.util.ArrayList; 024import java.util.Arrays; 025import java.util.Calendar; 026import java.util.Collection; 027import java.util.Collections; 028import java.util.HashMap; 029import java.util.HashSet; 030import java.util.LinkedList; 031import java.util.List; 032import java.util.Map; 033import java.util.Map.Entry; 034import java.util.Set; 035import java.util.stream.Collectors; 036 037import javax.resource.ResourceException; 038import javax.resource.cci.ConnectionMetaData; 039import javax.resource.cci.Interaction; 040import javax.resource.cci.LocalTransaction; 041import javax.resource.cci.ResultSetInfo; 042import javax.transaction.xa.XAException; 043import javax.transaction.xa.XAResource; 044import javax.transaction.xa.Xid; 045 046import org.apache.commons.logging.Log; 047import org.apache.commons.logging.LogFactory; 048import org.nuxeo.ecm.core.api.ConcurrentUpdateException; 049import org.nuxeo.ecm.core.api.DocumentExistsException; 050import org.nuxeo.ecm.core.api.IterableQueryResult; 051import org.nuxeo.ecm.core.api.NuxeoException; 052import org.nuxeo.ecm.core.api.PartialList; 053import org.nuxeo.ecm.core.api.ScrollResult; 054import org.nuxeo.ecm.core.api.repository.FulltextConfiguration; 055import org.nuxeo.ecm.core.api.repository.RepositoryManager; 056import org.nuxeo.ecm.core.api.security.ACL; 057import org.nuxeo.ecm.core.api.security.SecurityConstants; 058import org.nuxeo.ecm.core.model.LockManager; 059import org.nuxeo.ecm.core.query.QueryFilter; 060import org.nuxeo.ecm.core.query.sql.NXQL; 061import org.nuxeo.ecm.core.schema.DocumentType; 062import org.nuxeo.ecm.core.schema.SchemaManager; 063import org.nuxeo.ecm.core.storage.FulltextExtractorWork; 064import org.nuxeo.ecm.core.storage.sql.PersistenceContext.PathAndId; 065import org.nuxeo.ecm.core.storage.sql.RowMapper.NodeInfo; 066import org.nuxeo.ecm.core.storage.sql.RowMapper.RowBatch; 067import org.nuxeo.ecm.core.work.api.Work; 068import org.nuxeo.ecm.core.work.api.WorkManager; 069import org.nuxeo.ecm.core.work.api.WorkManager.Scheduling; 070import org.nuxeo.runtime.api.Framework; 071import org.nuxeo.runtime.metrics.MetricsService; 072import org.nuxeo.runtime.transaction.TransactionHelper; 073 074import com.codahale.metrics.MetricRegistry; 075import com.codahale.metrics.SharedMetricRegistries; 076import com.codahale.metrics.Timer; 077 078/** 079 * The session is the main high level access point to data from the underlying database. 080 */ 081public class SessionImpl implements Session, XAResource { 082 083 private static final Log log = LogFactory.getLog(SessionImpl.class); 084 085 /** 086 * Set this system property to false if you don't want repositories to be looked up under the compatibility name 087 * "default" in the "repositories" table. 088 * <p> 089 * Only do this if you start from an empty database, or if you have migrated the "repositories" table by hand, or if 090 * you need to create a new repository in a database already containing a "default" repository (table sharing, not 091 * recommended). 092 */ 093 public static final String COMPAT_REPOSITORY_NAME_KEY = "org.nuxeo.vcs.repository.name.default.compat"; 094 095 private static final boolean COMPAT_REPOSITORY_NAME = Boolean.parseBoolean( 096 Framework.getProperty(COMPAT_REPOSITORY_NAME_KEY, "true")); 097 098 protected final RepositoryImpl repository; 099 100 private final Mapper mapper; 101 102 private final Model model; 103 104 // public because used by unit tests 105 public final PersistenceContext context; 106 107 protected final boolean changeTokenEnabled; 108 109 private volatile boolean live; 110 111 private boolean inTransaction; 112 113 private Serializable rootNodeId; 114 115 private long threadId; 116 117 private String threadName; 118 119 private Throwable threadStack; 120 121 private boolean readAclsChanged; 122 123 // @since 5.7 124 protected final MetricRegistry registry = SharedMetricRegistries.getOrCreate(MetricsService.class.getName()); 125 126 private final Timer saveTimer; 127 128 private final Timer queryTimer; 129 130 private final Timer aclrUpdateTimer; 131 132 private static final java.lang.String LOG_MIN_DURATION_KEY = "org.nuxeo.vcs.query.log_min_duration_ms"; 133 134 private static final long LOG_MIN_DURATION_NS = Long.parseLong(Framework.getProperty(LOG_MIN_DURATION_KEY, "-1")) 135 * 1000000; 136 137 public SessionImpl(RepositoryImpl repository, Model model, Mapper mapper) { 138 this.repository = repository; 139 this.mapper = mapper; 140 this.model = model; 141 context = new PersistenceContext(model, mapper, this); 142 changeTokenEnabled = repository.isChangeTokenEnabled(); 143 live = true; 144 readAclsChanged = false; 145 146 saveTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(), "saves")); 147 queryTimer = registry.timer(MetricRegistry.name("nuxeo", "repositories", repository.getName(), "queries")); 148 aclrUpdateTimer = registry.timer( 149 MetricRegistry.name("nuxeo", "repositories", repository.getName(), "aclr-updates")); 150 151 computeRootNode(); 152 } 153 154 public void checkLive() { 155 if (!live) { 156 throw new IllegalStateException("Session is not live"); 157 } 158 checkThread(); 159 } 160 161 // called by NetServlet when forwarding remote NetMapper calls. 162 @Override 163 public Mapper getMapper() { 164 return mapper; 165 } 166 167 /** 168 * Gets the XAResource. Called by the ManagedConnectionImpl, which actually wraps it in a connection-aware 169 * implementation. 170 */ 171 public XAResource getXAResource() { 172 return this; 173 } 174 175 /** 176 * Clears all the caches. Called by RepositoryManagement. 177 */ 178 protected int clearCaches() { 179 if (inTransaction) { 180 // avoid potential multi-threaded access to active session 181 return 0; 182 } 183 checkThreadEnd(); 184 return context.clearCaches(); 185 } 186 187 protected PersistenceContext getContext() { 188 return context; 189 } 190 191 protected void rollback() { 192 context.clearCaches(); 193 } 194 195 protected void checkThread() { 196 if (threadId == 0) { 197 return; 198 } 199 long currentThreadId = Thread.currentThread().getId(); 200 if (threadId == currentThreadId) { 201 return; 202 } 203 String currentThreadName = Thread.currentThread().getName(); 204 String msg = String.format( 205 "Concurrency Error: Session was started in thread %s (%s)" + " but is being used in thread %s (%s)", 206 threadId, threadName, currentThreadId, currentThreadName); 207 throw new IllegalStateException(msg, threadStack); 208 } 209 210 protected void checkThreadStart() { 211 threadId = Thread.currentThread().getId(); 212 threadName = Thread.currentThread().getName(); 213 if (log.isDebugEnabled()) { 214 threadStack = new Throwable("owner stack trace"); 215 } 216 } 217 218 protected void checkThreadEnd() { 219 threadId = 0; 220 threadName = null; 221 threadStack = null; 222 } 223 224 /** 225 * Generates a new id, or used a pre-generated one (import). 226 */ 227 protected Serializable generateNewId(Serializable id) { 228 return context.generateNewId(id); 229 } 230 231 protected boolean isIdNew(Serializable id) { 232 return context.isIdNew(id); 233 } 234 235 /* 236 * ----- javax.resource.cci.Connection ----- 237 */ 238 239 @Override 240 public void close() throws ResourceException { 241 try { 242 checkLive(); 243 closeSession(); 244 repository.closeSession(this); 245 } catch (Exception cause) { 246 throw new ResourceException(cause); 247 } 248 } 249 250 protected void closeSession() { 251 live = false; 252 context.clearCaches(); 253 // close the mapper and therefore the connection 254 mapper.close(); 255 // don't clean the caches, we keep the pristine cache around 256 // TODO this is getting destroyed, we can clean everything 257 } 258 259 @Override 260 public Interaction createInteraction() throws ResourceException { 261 throw new UnsupportedOperationException(); 262 } 263 264 @Override 265 public LocalTransaction getLocalTransaction() throws ResourceException { 266 throw new UnsupportedOperationException(); 267 } 268 269 @Override 270 public ConnectionMetaData getMetaData() throws ResourceException { 271 throw new UnsupportedOperationException(); 272 } 273 274 @Override 275 public ResultSetInfo getResultSetInfo() throws ResourceException { 276 throw new UnsupportedOperationException(); 277 } 278 279 /* 280 * ----- Session ----- 281 */ 282 283 @Override 284 public boolean isLive() { 285 return live; 286 } 287 288 @Override 289 public String getRepositoryName() { 290 return repository.getName(); 291 } 292 293 @Override 294 public Model getModel() { 295 return model; 296 } 297 298 @Override 299 public Node getRootNode() { 300 checkLive(); 301 return getNodeById(rootNodeId); 302 } 303 304 @Override 305 public void save() { 306 final Timer.Context timerContext = saveTimer.time(); 307 try { 308 checkLive(); 309 flush(); 310 if (!inTransaction) { 311 sendInvalidationsToOthers(); 312 // as we don't have a way to know when the next 313 // non-transactional 314 // statement will start, process invalidations immediately 315 } 316 processReceivedInvalidations(); 317 } finally { 318 timerContext.stop(); 319 } 320 } 321 322 protected void flush() { 323 checkThread(); 324 List<Work> works; 325 if (!repository.getRepositoryDescriptor().getFulltextDescriptor().getFulltextDisabled()) { 326 works = getFulltextWorks(); 327 } else { 328 works = Collections.emptyList(); 329 } 330 doFlush(); 331 if (readAclsChanged) { 332 updateReadAcls(); 333 } 334 scheduleWork(works); 335 checkInvalidationsConflict(); 336 } 337 338 protected void scheduleWork(List<Work> works) { 339 // do async fulltext indexing only if high-level sessions are available 340 RepositoryManager repositoryManager = Framework.getService(RepositoryManager.class); 341 if (repositoryManager != null && !works.isEmpty()) { 342 WorkManager workManager = Framework.getService(WorkManager.class); 343 for (Work work : works) { 344 // schedule work post-commit 345 // in non-tx mode, this may execute it nearly immediately 346 workManager.schedule(work, Scheduling.IF_NOT_SCHEDULED, true); 347 } 348 } 349 } 350 351 protected void doFlush() { 352 List<Fragment> fragmentsToClearDirty = new ArrayList<>(0); 353 RowBatch batch = context.getSaveBatch(fragmentsToClearDirty); 354 if (!batch.isEmpty()) { 355 log.debug("Saving session"); 356 // execute the batch 357 try { 358 mapper.write(batch); 359 log.debug("End of save"); 360 } finally { 361 // callers must never observe a DeltaLong in the fragments 362 for (Fragment fragment : fragmentsToClearDirty) { 363 fragment.clearDirty(); 364 } 365 } 366 } 367 } 368 369 protected Serializable getContainingDocument(Serializable id) { 370 return context.getContainingDocument(id); 371 } 372 373 /** 374 * Gets the fulltext updates to do. Called at save() time. 375 * 376 * @return a list of {@link Work} instances to schedule post-commit. 377 */ 378 protected List<Work> getFulltextWorks() { 379 Set<Serializable> dirtyStrings = new HashSet<>(); 380 Set<Serializable> dirtyBinaries = new HashSet<>(); 381 context.findDirtyDocuments(dirtyStrings, dirtyBinaries); 382 if (model.getFulltextConfiguration().fulltextSearchDisabled) { 383 // We only need to update dirty simple strings if fulltext search is not disabled 384 // because in that case Elasticsearch will do its own extraction/indexing. 385 // We need to detect dirty binary strings in all cases, because Elasticsearch 386 // will need them even if the repository itself doesn't use them for search. 387 dirtyStrings = Collections.emptySet(); 388 } 389 Set<Serializable> dirtyIds = new HashSet<>(); 390 dirtyIds.addAll(dirtyStrings); 391 dirtyIds.addAll(dirtyBinaries); 392 if (dirtyIds.isEmpty()) { 393 return Collections.emptyList(); 394 } 395 markIndexingInProgress(dirtyIds); 396 List<Work> works = new ArrayList<>(dirtyIds.size()); 397 for (Serializable id : dirtyIds) { 398 boolean updateSimpleText = dirtyStrings.contains(id); 399 boolean updateBinaryText = dirtyBinaries.contains(id); 400 Work work = new FulltextExtractorWork(repository.getName(), model.idToString(id), updateSimpleText, 401 updateBinaryText, true); 402 works.add(work); 403 } 404 return works; 405 } 406 407 /** 408 * Mark indexing in progress, so that future copies (including versions) will be indexed as well. 409 */ 410 protected void markIndexingInProgress(Set<Serializable> dirtyIds) { 411 FulltextConfiguration fulltextConfiguration = model.getFulltextConfiguration(); 412 for (Node node : getNodesByIds(dirtyIds)) { 413 if (!fulltextConfiguration.isFulltextIndexable(node.getPrimaryType())) { 414 continue; 415 } 416 node.getSimpleProperty(Model.FULLTEXT_JOBID_PROP).setValue(model.idToString(node.getId())); 417 } 418 } 419 420 /** 421 * Post-transaction invalidations notification. 422 * <p> 423 * Called post-transaction by session commit/rollback or transactionless save. 424 */ 425 protected void sendInvalidationsToOthers() { 426 context.sendInvalidationsToOthers(); 427 } 428 429 /** 430 * Processes all invalidations accumulated. 431 * <p> 432 * Called pre-transaction by start or transactionless save; 433 */ 434 protected void processReceivedInvalidations() { 435 context.processReceivedInvalidations(); 436 } 437 438 /** 439 * Post transaction check invalidations processing. 440 */ 441 protected void checkInvalidationsConflict() { 442 // repository.receiveClusterInvalidations(this); 443 context.checkInvalidationsConflict(); 444 } 445 446 /* 447 * ------------------------------------------------------------- 448 * ------------------------------------------------------------- 449 * ------------------------------------------------------------- 450 */ 451 452 protected Node getNodeById(Serializable id, boolean prefetch) { 453 List<Node> nodes = getNodesByIds(Collections.singletonList(id), prefetch); 454 Node node = nodes.get(0); 455 // ((JDBCMapper) ((CachingMapper) 456 // mapper).mapper).logger.log("getNodeById " + id + " -> " + (node == 457 // null ? "missing" : "found")); 458 return node; 459 } 460 461 @Override 462 public Node getNodeById(Serializable id) { 463 checkLive(); 464 if (id == null) { 465 throw new IllegalArgumentException("Illegal null id"); 466 } 467 return getNodeById(id, true); 468 } 469 470 public List<Node> getNodesByIds(Collection<Serializable> ids, boolean prefetch) { 471 // get hier fragments 472 List<RowId> hierRowIds = new ArrayList<>(ids.size()); 473 for (Serializable id : ids) { 474 hierRowIds.add(new RowId(Model.HIER_TABLE_NAME, id)); 475 } 476 477 List<Fragment> hierFragments = context.getMulti(hierRowIds, false); 478 479 // find available paths 480 Map<Serializable, String> paths = new HashMap<>(); 481 Set<Serializable> parentIds = new HashSet<>(); 482 for (Fragment fragment : hierFragments) { 483 Serializable id = fragment.getId(); 484 PathAndId pathOrId = context.getPathOrMissingParentId((SimpleFragment) fragment, false); 485 // find missing fragments 486 if (pathOrId.path != null) { 487 paths.put(id, pathOrId.path); 488 } else { 489 parentIds.add(pathOrId.id); 490 } 491 } 492 // fetch the missing parents and their ancestors in bulk 493 if (!parentIds.isEmpty()) { 494 // fetch them in the context 495 getHierarchyAndAncestors(parentIds); 496 // compute missing paths using context 497 for (Fragment fragment : hierFragments) { 498 Serializable id = fragment.getId(); 499 if (paths.containsKey(id)) { 500 continue; 501 } 502 String path = context.getPath((SimpleFragment) fragment); 503 paths.put(id, path); 504 } 505 } 506 507 // prepare fragment groups to build nodes 508 Map<Serializable, FragmentGroup> fragmentGroups = new HashMap<>(ids.size()); 509 for (Fragment fragment : hierFragments) { 510 Serializable id = fragment.row.id; 511 fragmentGroups.put(id, new FragmentGroup((SimpleFragment) fragment, new FragmentsMap())); 512 } 513 514 if (prefetch) { 515 List<RowId> bulkRowIds = new ArrayList<>(); 516 Set<Serializable> proxyIds = new HashSet<>(); 517 518 // get rows to prefetch for hier fragments 519 for (Fragment fragment : hierFragments) { 520 findPrefetchedFragments((SimpleFragment) fragment, bulkRowIds, proxyIds); 521 } 522 523 // proxies 524 525 // get proxies fragments 526 List<RowId> proxiesRowIds = new ArrayList<>(proxyIds.size()); 527 for (Serializable id : proxyIds) { 528 proxiesRowIds.add(new RowId(Model.PROXY_TABLE_NAME, id)); 529 } 530 List<Fragment> proxiesFragments = context.getMulti(proxiesRowIds, true); 531 Set<Serializable> targetIds = new HashSet<>(); 532 for (Fragment fragment : proxiesFragments) { 533 Serializable targetId = ((SimpleFragment) fragment).get(Model.PROXY_TARGET_KEY); 534 targetIds.add(targetId); 535 } 536 537 // get hier fragments for proxies' targets 538 targetIds.removeAll(ids); // only those we don't have already 539 hierRowIds = new ArrayList<>(targetIds.size()); 540 for (Serializable id : targetIds) { 541 hierRowIds.add(new RowId(Model.HIER_TABLE_NAME, id)); 542 } 543 hierFragments = context.getMulti(hierRowIds, true); 544 for (Fragment fragment : hierFragments) { 545 findPrefetchedFragments((SimpleFragment) fragment, bulkRowIds, null); 546 } 547 548 // we have everything to be prefetched 549 550 // fetch all the prefetches in bulk 551 List<Fragment> fragments = context.getMulti(bulkRowIds, true); 552 553 // put each fragment in the map of the proper group 554 for (Fragment fragment : fragments) { 555 FragmentGroup fragmentGroup = fragmentGroups.get(fragment.row.id); 556 if (fragmentGroup != null) { 557 fragmentGroup.fragments.put(fragment.row.tableName, fragment); 558 } 559 } 560 } 561 562 // assemble nodes from the fragment groups 563 List<Node> nodes = new ArrayList<>(ids.size()); 564 for (Serializable id : ids) { 565 FragmentGroup fragmentGroup = fragmentGroups.get(id); 566 // null if deleted/absent 567 Node node = fragmentGroup == null ? null : new Node(context, fragmentGroup, paths.get(id)); 568 nodes.add(node); 569 } 570 571 return nodes; 572 } 573 574 /** 575 * Finds prefetched fragments for a hierarchy fragment, takes note of the ones that are proxies. 576 */ 577 protected void findPrefetchedFragments(SimpleFragment hierFragment, List<RowId> bulkRowIds, 578 Set<Serializable> proxyIds) { 579 Serializable id = hierFragment.row.id; 580 581 // find type 582 String typeName = (String) hierFragment.get(Model.MAIN_PRIMARY_TYPE_KEY); 583 if (Model.PROXY_TYPE.equals(typeName)) { 584 if (proxyIds != null) { 585 proxyIds.add(id); 586 } 587 return; 588 } 589 590 // find table names 591 Set<String> tableNames = model.getTypePrefetchedFragments(typeName); 592 if (tableNames == null) { 593 return; // unknown (obsolete) type 594 } 595 596 // add row id for each table name 597 Serializable parentId = hierFragment.get(Model.HIER_PARENT_KEY); 598 for (String tableName : tableNames) { 599 if (Model.HIER_TABLE_NAME.equals(tableName)) { 600 continue; // already fetched 601 } 602 if (parentId != null && Model.VERSION_TABLE_NAME.equals(tableName)) { 603 continue; // not a version, don't fetch this table 604 // TODO incorrect if we have filed versions 605 } 606 bulkRowIds.add(new RowId(tableName, id)); 607 } 608 } 609 610 @Override 611 public List<Node> getNodesByIds(Collection<Serializable> ids) { 612 checkLive(); 613 return getNodesByIds(ids, true); 614 } 615 616 @Override 617 public Node getParentNode(Node node) { 618 checkLive(); 619 if (node == null) { 620 throw new IllegalArgumentException("Illegal null node"); 621 } 622 Serializable id = node.getHierFragment().get(Model.HIER_PARENT_KEY); 623 return id == null ? null : getNodeById(id); 624 } 625 626 @Override 627 public String getPath(Node node) { 628 checkLive(); 629 String path = node.getPath(); 630 if (path == null) { 631 path = context.getPath(node.getHierFragment()); 632 } 633 return path; 634 } 635 636 /* 637 * Normalize using NFC to avoid decomposed characters (like 'e' + COMBINING ACUTE ACCENT instead of LATIN SMALL 638 * LETTER E WITH ACUTE). NFKC (normalization using compatibility decomposition) is not used, because compatibility 639 * decomposition turns some characters (LATIN SMALL LIGATURE FFI, TRADE MARK SIGN, FULLWIDTH SOLIDUS) into a series 640 * of characters ('f'+'f'+'i', 'T'+'M', '/') that cannot be re-composed into the original, and therefore loses 641 * information. 642 */ 643 protected String normalize(String path) { 644 return Normalizer.normalize(path, Normalizer.Form.NFC); 645 } 646 647 /* Does not apply to properties for now (no use case). */ 648 @Override 649 public Node getNodeByPath(String path, Node node) { 650 // TODO optimize this to use a dedicated path-based table 651 checkLive(); 652 if (path == null) { 653 throw new IllegalArgumentException("Illegal null path"); 654 } 655 path = normalize(path); 656 int i; 657 if (path.startsWith("/")) { 658 node = getRootNode(); 659 if (path.equals("/")) { 660 return node; 661 } 662 i = 1; 663 } else { 664 if (node == null) { 665 throw new IllegalArgumentException("Illegal relative path with null node: " + path); 666 } 667 i = 0; 668 } 669 String[] names = path.split("/", -1); 670 for (; i < names.length; i++) { 671 String name = names[i]; 672 if (name.length() == 0) { 673 throw new IllegalArgumentException("Illegal path with empty component: " + path); 674 } 675 node = getChildNode(node, name, false); 676 if (node == null) { 677 return null; 678 } 679 } 680 return node; 681 } 682 683 @Override 684 public boolean addMixinType(Node node, String mixin) { 685 if (model.getMixinPropertyInfos(mixin) == null) { 686 throw new IllegalArgumentException("No such mixin: " + mixin); 687 } 688 if (model.getDocumentTypeFacets(node.getPrimaryType()).contains(mixin)) { 689 return false; // already present in type 690 } 691 List<String> list = new ArrayList<>(Arrays.asList(node.getMixinTypes())); 692 if (list.contains(mixin)) { 693 return false; // already present in node 694 } 695 Set<String> otherChildrenNames = getChildrenNames(node.getPrimaryType(), list); 696 list.add(mixin); 697 String[] mixins = list.toArray(new String[list.size()]); 698 node.hierFragment.put(Model.MAIN_MIXIN_TYPES_KEY, mixins); 699 // immediately create child nodes (for complex properties) in order 700 // to avoid concurrency issue later on 701 Map<String, String> childrenTypes = model.getMixinComplexChildren(mixin); 702 for (Entry<String, String> es : childrenTypes.entrySet()) { 703 String childName = es.getKey(); 704 String childType = es.getValue(); 705 // child may already exist if the schema is part of the primary type or another facet 706 if (otherChildrenNames.contains(childName)) { 707 continue; 708 } 709 addChildNode(node, childName, null, childType, true); 710 } 711 return true; 712 } 713 714 @Override 715 public boolean removeMixinType(Node node, String mixin) { 716 List<String> list = new ArrayList<>(Arrays.asList(node.getMixinTypes())); 717 if (!list.remove(mixin)) { 718 return false; // not present in node 719 } 720 String[] mixins = list.toArray(new String[list.size()]); 721 if (mixins.length == 0) { 722 mixins = null; 723 } 724 node.hierFragment.put(Model.MAIN_MIXIN_TYPES_KEY, mixins); 725 Set<String> otherChildrenNames = getChildrenNames(node.getPrimaryType(), list); 726 Map<String, String> childrenTypes = model.getMixinComplexChildren(mixin); 727 for (String childName : childrenTypes.keySet()) { 728 // child must be kept if the schema is part of primary type or another facet 729 if (otherChildrenNames.contains(childName)) { 730 continue; 731 } 732 Node child = getChildNode(node, childName, true); 733 removePropertyNode(child); 734 } 735 node.clearCache(); 736 return true; 737 } 738 739 @Override 740 public ScrollResult<String> scroll(String query, int batchSize, int keepAliveSeconds) { 741 return mapper.scroll(query, batchSize, keepAliveSeconds); 742 } 743 744 @Override 745 public ScrollResult<String> scroll(String query, QueryFilter queryFilter, int batchSize, int keepAliveSeconds) { 746 return mapper.scroll(query, queryFilter, batchSize, keepAliveSeconds); 747 } 748 749 @Override 750 public ScrollResult<String> scroll(String scrollId) { 751 return mapper.scroll(scrollId); 752 } 753 754 /** 755 * Gets complex children names defined by the primary type and the list of mixins. 756 */ 757 protected Set<String> getChildrenNames(String primaryType, List<String> mixins) { 758 Map<String, String> cc = model.getTypeComplexChildren(primaryType); 759 if (cc == null) { 760 cc = Collections.emptyMap(); 761 } 762 Set<String> childrenNames = new HashSet<>(cc.keySet()); 763 for (String mixin : mixins) { 764 cc = model.getMixinComplexChildren(mixin); 765 if (cc != null) { 766 childrenNames.addAll(cc.keySet()); 767 } 768 } 769 return childrenNames; 770 } 771 772 @Override 773 public Node addChildNode(Node parent, String name, Long pos, String typeName, boolean complexProp) { 774 if (pos == null && !complexProp && parent != null) { 775 pos = context.getNextPos(parent.getId(), complexProp); 776 } 777 return addChildNode(null, parent, name, pos, typeName, complexProp); 778 } 779 780 @Override 781 public Node addChildNode(Serializable id, Node parent, String name, Long pos, String typeName, 782 boolean complexProp) { 783 checkLive(); 784 if (name == null) { 785 throw new IllegalArgumentException("Illegal null name"); 786 } 787 name = normalize(name); 788 if (name.contains("/") || name.equals(".") || name.equals("..")) { 789 throw new IllegalArgumentException("Illegal name: " + name); 790 } 791 if (!model.isType(typeName)) { 792 throw new IllegalArgumentException("Unknown type: " + typeName); 793 } 794 id = generateNewId(id); 795 Serializable parentId = parent == null ? null : parent.hierFragment.getId(); 796 Node node = addNode(id, parentId, name, pos, typeName, complexProp); 797 // immediately create child nodes (for complex properties) in order 798 // to avoid concurrency issue later on 799 Map<String, String> childrenTypes = model.getTypeComplexChildren(typeName); 800 for (Entry<String, String> es : childrenTypes.entrySet()) { 801 String childName = es.getKey(); 802 String childType = es.getValue(); 803 addChildNode(node, childName, null, childType, true); 804 } 805 return node; 806 } 807 808 protected Node addNode(Serializable id, Serializable parentId, String name, Long pos, String typeName, 809 boolean complexProp) { 810 requireReadAclsUpdate(); 811 // main info 812 Row hierRow = new Row(Model.HIER_TABLE_NAME, id); 813 hierRow.putNew(Model.HIER_PARENT_KEY, parentId); 814 hierRow.putNew(Model.HIER_CHILD_NAME_KEY, name); 815 hierRow.putNew(Model.HIER_CHILD_POS_KEY, pos); 816 hierRow.putNew(Model.MAIN_PRIMARY_TYPE_KEY, typeName); 817 hierRow.putNew(Model.HIER_CHILD_ISPROPERTY_KEY, Boolean.valueOf(complexProp)); 818 if (changeTokenEnabled) { 819 hierRow.putNew(Model.MAIN_SYS_CHANGE_TOKEN_KEY, Model.INITIAL_SYS_CHANGE_TOKEN); 820 } 821 SimpleFragment hierFragment = context.createHierarchyFragment(hierRow); 822 FragmentGroup fragmentGroup = new FragmentGroup(hierFragment, new FragmentsMap()); 823 return new Node(context, fragmentGroup, context.getPath(hierFragment)); 824 } 825 826 @Override 827 public Node addProxy(Serializable targetId, Serializable versionableId, Node parent, String name, Long pos) { 828 if (!repository.getRepositoryDescriptor().getProxiesEnabled()) { 829 throw new NuxeoException("Proxies are disabled by configuration"); 830 } 831 Node proxy = addChildNode(parent, name, pos, Model.PROXY_TYPE, false); 832 proxy.setSimpleProperty(Model.PROXY_TARGET_PROP, targetId); 833 proxy.setSimpleProperty(Model.PROXY_VERSIONABLE_PROP, versionableId); 834 if (changeTokenEnabled) { 835 proxy.setSimpleProperty(Model.MAIN_SYS_CHANGE_TOKEN_PROP, Model.INITIAL_SYS_CHANGE_TOKEN); 836 proxy.setSimpleProperty(Model.MAIN_CHANGE_TOKEN_PROP, Model.INITIAL_CHANGE_TOKEN); 837 } 838 SimpleFragment proxyFragment = (SimpleFragment) proxy.fragments.get(Model.PROXY_TABLE_NAME); 839 context.createdProxyFragment(proxyFragment); 840 return proxy; 841 } 842 843 @Override 844 public void setProxyTarget(Node proxy, Serializable targetId) { 845 if (!repository.getRepositoryDescriptor().getProxiesEnabled()) { 846 throw new NuxeoException("Proxies are disabled by configuration"); 847 } 848 SimpleProperty prop = proxy.getSimpleProperty(Model.PROXY_TARGET_PROP); 849 Serializable oldTargetId = prop.getValue(); 850 if (!oldTargetId.equals(targetId)) { 851 SimpleFragment proxyFragment = (SimpleFragment) proxy.fragments.get(Model.PROXY_TABLE_NAME); 852 context.removedProxyTarget(proxyFragment); 853 proxy.setSimpleProperty(Model.PROXY_TARGET_PROP, targetId); 854 context.addedProxyTarget(proxyFragment); 855 } 856 } 857 858 @Override 859 public boolean hasChildNode(Node parent, String name, boolean complexProp) { 860 checkLive(); 861 // TODO could optimize further by not fetching the fragment at all 862 SimpleFragment fragment = context.getChildHierByName(parent.getId(), normalize(name), complexProp); 863 return fragment != null; 864 } 865 866 @Override 867 public Node getChildNode(Node parent, String name, boolean complexProp) { 868 checkLive(); 869 if (name == null || name.contains("/") || name.equals(".") || name.equals("..")) { 870 throw new IllegalArgumentException("Illegal name: " + name); 871 } 872 SimpleFragment fragment = context.getChildHierByName(parent.getId(), name, complexProp); 873 return fragment == null ? null : getNodeById(fragment.getId()); 874 } 875 876 // TODO optimize with dedicated backend call 877 @Override 878 public boolean hasChildren(Node parent, boolean complexProp) { 879 checkLive(); 880 List<SimpleFragment> children = context.getChildren(parent.getId(), null, complexProp); 881 if (complexProp) { 882 return !children.isEmpty(); 883 } 884 if (children.isEmpty()) { 885 return false; 886 } 887 // we have to check that type names are not obsolete, as they wouldn't be returned 888 // by getChildren and we must be consistent 889 SchemaManager schemaManager = Framework.getService(SchemaManager.class); 890 for (SimpleFragment simpleFragment : children) { 891 String primaryType = simpleFragment.getString(Model.MAIN_PRIMARY_TYPE_KEY); 892 if (primaryType.equals(Model.PROXY_TYPE)) { 893 Node node = getNodeById(simpleFragment.getId(), false); 894 Serializable targetId = node.getSimpleProperty(Model.PROXY_TARGET_PROP).getValue(); 895 if (targetId == null) { 896 // missing target, should not happen, ignore 897 continue; 898 } 899 Node target = getNodeById(targetId, false); 900 if (target == null) { 901 continue; 902 } 903 primaryType = target.getPrimaryType(); 904 } 905 DocumentType type = schemaManager.getDocumentType(primaryType); 906 if (type == null) { 907 // obsolete type, ignored in getChildren 908 continue; 909 } 910 return true; 911 } 912 return false; 913 } 914 915 @Override 916 public List<Node> getChildren(Node parent, String name, boolean complexProp) { 917 checkLive(); 918 List<SimpleFragment> fragments = context.getChildren(parent.getId(), name, complexProp); 919 List<Node> nodes = new ArrayList<>(fragments.size()); 920 for (SimpleFragment fragment : fragments) { 921 Node node = getNodeById(fragment.getId()); 922 if (node == null) { 923 // cannot happen 924 log.error("Child node cannot be created: " + fragment.getId()); 925 continue; 926 } 927 nodes.add(node); 928 } 929 return nodes; 930 } 931 932 @Override 933 public void orderBefore(Node parent, Node source, Node dest) { 934 checkLive(); 935 context.orderBefore(parent.getId(), source.getId(), dest == null ? null : dest.getId()); 936 } 937 938 @Override 939 public Node move(Node source, Node parent, String name) { 940 checkLive(); 941 if (!parent.getId().equals(source.getParentId())) { 942 flush(); // needed when doing many moves for circular stuff 943 } 944 context.move(source, parent.getId(), name); 945 requireReadAclsUpdate(); 946 return source; 947 } 948 949 @Override 950 public Node copy(Node source, Node parent, String name) { 951 checkLive(); 952 flush(); 953 Serializable id = context.copy(source, parent.getId(), name); 954 requireReadAclsUpdate(); 955 return getNodeById(id); 956 } 957 958 @Override 959 public void removeNode(Node node) { 960 checkLive(); 961 flush(); 962 // remove the lock using the lock manager 963 // TODO children locks? 964 Serializable id = node.getId(); 965 getLockManager().removeLock(model.idToString(id), null); 966 // find all descendants 967 List<NodeInfo> nodeInfos = context.getNodeAndDescendantsInfo(node.getHierFragment()); 968 969 // check that there is no active retention 970 Set<Serializable> retentionActiveIds = nodeInfos.stream() // 971 .filter(info -> info.isRetentionActive) 972 .map(info -> info.id) 973 .collect(Collectors.toSet()); 974 if (!retentionActiveIds.isEmpty()) { 975 if (retentionActiveIds.contains(id)) { 976 throw new DocumentExistsException("Cannot remove " + id + ", it is under active retention"); 977 } else { 978 throw new DocumentExistsException("Cannot remove " + id + ", subdocument " 979 + retentionActiveIds.iterator().next() + " is under active retention"); 980 } 981 } 982 983 // if a proxy target is removed, check that all proxies to it are removed 984 if (repository.getRepositoryDescriptor().getProxiesEnabled()) { 985 Set<Serializable> removedIds = nodeInfos.stream().map(info -> info.id).collect(Collectors.toSet()); 986 // find proxies pointing to any removed document 987 Set<Serializable> proxyIds = context.getTargetProxies(removedIds); 988 for (Serializable proxyId : proxyIds) { 989 if (!removedIds.contains(proxyId)) { 990 Node proxy = getNodeById(proxyId); 991 Serializable targetId = (Serializable) proxy.getSingle(Model.PROXY_TARGET_PROP); 992 throw new DocumentExistsException( 993 "Cannot remove " + id + ", subdocument " + targetId + " is the target of proxy " + proxyId); 994 } 995 } 996 } 997 998 // remove all nodes 999 context.removeNode(node.getHierFragment(), nodeInfos); 1000 } 1001 1002 @Override 1003 public void removePropertyNode(Node node) { 1004 checkLive(); 1005 // no flush needed 1006 context.removePropertyNode(node.getHierFragment()); 1007 } 1008 1009 @Override 1010 public Node checkIn(Node node, String label, String checkinComment) { 1011 checkLive(); 1012 flush(); 1013 Serializable id = context.checkIn(node, label, checkinComment); 1014 requireReadAclsUpdate(); 1015 // save to reflect changes immediately in database 1016 flush(); 1017 return getNodeById(id); 1018 } 1019 1020 @Override 1021 public void checkOut(Node node) { 1022 checkLive(); 1023 context.checkOut(node); 1024 requireReadAclsUpdate(); 1025 } 1026 1027 @Override 1028 public void restore(Node node, Node version) { 1029 checkLive(); 1030 // save done inside method 1031 context.restoreVersion(node, version); 1032 requireReadAclsUpdate(); 1033 } 1034 1035 @Override 1036 public Node getVersionByLabel(Serializable versionSeriesId, String label) { 1037 if (label == null) { 1038 return null; 1039 } 1040 List<Node> versions = getVersions(versionSeriesId); 1041 for (Node node : versions) { 1042 String l = (String) node.getSimpleProperty(Model.VERSION_LABEL_PROP).getValue(); 1043 if (label.equals(l)) { 1044 return node; 1045 } 1046 } 1047 return null; 1048 } 1049 1050 @Override 1051 public Node getLastVersion(Serializable versionSeriesId) { 1052 checkLive(); 1053 List<Serializable> ids = context.getVersionIds(versionSeriesId); 1054 return ids.isEmpty() ? null : getNodeById(ids.get(ids.size() - 1)); 1055 } 1056 1057 @Override 1058 public List<Node> getVersions(Serializable versionSeriesId) { 1059 checkLive(); 1060 List<Serializable> ids = context.getVersionIds(versionSeriesId); 1061 List<Node> nodes = new ArrayList<>(ids.size()); 1062 for (Serializable id : ids) { 1063 nodes.add(getNodeById(id)); 1064 } 1065 return nodes; 1066 } 1067 1068 @Override 1069 public List<Node> getProxies(Node document, Node parent) { 1070 checkLive(); 1071 if (!repository.getRepositoryDescriptor().getProxiesEnabled()) { 1072 return Collections.emptyList(); 1073 } 1074 1075 List<Serializable> ids; 1076 if (document.isVersion()) { 1077 ids = context.getTargetProxyIds(document.getId()); 1078 } else { 1079 Serializable versionSeriesId; 1080 if (document.isProxy()) { 1081 versionSeriesId = document.getSimpleProperty(Model.PROXY_VERSIONABLE_PROP).getValue(); 1082 } else { 1083 versionSeriesId = document.getId(); 1084 } 1085 ids = context.getSeriesProxyIds(versionSeriesId); 1086 } 1087 1088 List<Node> nodes = getNodes(ids); 1089 1090 if (parent != null) { 1091 // filter by parent 1092 Serializable parentId = parent.getId(); 1093 nodes.removeIf(node -> !parentId.equals(node.getParentId())); 1094 } 1095 1096 return nodes; 1097 } 1098 1099 protected List<Node> getNodes(List<Serializable> ids) { 1100 List<Node> nodes = new LinkedList<>(); 1101 for (Serializable id : ids) { 1102 Node node = getNodeById(id); 1103 if (node != null || Boolean.TRUE.booleanValue()) { // XXX 1104 // null if deleted, which means selection wasn't correctly 1105 // updated 1106 nodes.add(node); 1107 } 1108 } 1109 return nodes; 1110 } 1111 1112 @Override 1113 public List<Node> getProxies(Node document) { 1114 checkLive(); 1115 if (!repository.getRepositoryDescriptor().getProxiesEnabled()) { 1116 return Collections.emptyList(); 1117 } 1118 List<Serializable> ids = context.getTargetProxyIds(document.getId()); 1119 return getNodes(ids); 1120 } 1121 1122 /** 1123 * Fetches the hierarchy fragment for the given rows and all their ancestors. 1124 * 1125 * @param ids the fragment ids 1126 */ 1127 protected List<Fragment> getHierarchyAndAncestors(Collection<Serializable> ids) { 1128 Set<Serializable> allIds = mapper.getAncestorsIds(ids); 1129 allIds.addAll(ids); 1130 List<RowId> rowIds = new ArrayList<>(allIds.size()); 1131 for (Serializable id : allIds) { 1132 rowIds.add(new RowId(Model.HIER_TABLE_NAME, id)); 1133 } 1134 return context.getMulti(rowIds, true); 1135 } 1136 1137 @Override 1138 public PartialList<Serializable> query(String query, QueryFilter queryFilter, boolean countTotal) { 1139 final Timer.Context timerContext = queryTimer.time(); 1140 try { 1141 return mapper.query(query, NXQL.NXQL, queryFilter, countTotal); 1142 } finally { 1143 timerContext.stop(); 1144 } 1145 } 1146 1147 @Override 1148 public PartialList<Serializable> query(String query, String queryType, QueryFilter queryFilter, long countUpTo) { 1149 final Timer.Context timerContext = queryTimer.time(); 1150 try { 1151 return mapper.query(query, queryType, queryFilter, countUpTo); 1152 } finally { 1153 long duration = timerContext.stop(); 1154 if ((LOG_MIN_DURATION_NS >= 0) && (duration > LOG_MIN_DURATION_NS)) { 1155 String msg = String.format("duration_ms:\t%.2f\t%s %s\tquery\t%s", duration / 1000000.0, queryFilter, 1156 countUpToAsString(countUpTo), query); 1157 if (log.isTraceEnabled()) { 1158 log.info(msg, new Throwable("Slow query stack trace")); 1159 } else { 1160 log.info(msg); 1161 } 1162 } 1163 } 1164 } 1165 1166 private String countUpToAsString(long countUpTo) { 1167 if (countUpTo > 0) { 1168 return String.format("count total results up to %d", countUpTo); 1169 } 1170 return countUpTo == -1 ? "count total results UNLIMITED" : ""; 1171 } 1172 1173 @Override 1174 public IterableQueryResult queryAndFetch(String query, String queryType, QueryFilter queryFilter, 1175 Object... params) { 1176 return queryAndFetch(query, queryType, queryFilter, false, params); 1177 } 1178 1179 @Override 1180 public IterableQueryResult queryAndFetch(String query, String queryType, QueryFilter queryFilter, 1181 boolean distinctDocuments, Object... params) { 1182 final Timer.Context timerContext = queryTimer.time(); 1183 try { 1184 return mapper.queryAndFetch(query, queryType, queryFilter, distinctDocuments, params); 1185 } finally { 1186 long duration = timerContext.stop(); 1187 if ((LOG_MIN_DURATION_NS >= 0) && (duration > LOG_MIN_DURATION_NS)) { 1188 String msg = String.format("duration_ms:\t%.2f\t%s\tqueryAndFetch\t%s", duration / 1000000.0, 1189 queryFilter, query); 1190 if (log.isTraceEnabled()) { 1191 log.info(msg, new Throwable("Slow query stack trace")); 1192 } else { 1193 log.info(msg); 1194 } 1195 } 1196 } 1197 } 1198 1199 @Override 1200 public PartialList<Map<String, Serializable>> queryProjection(String query, String queryType, 1201 QueryFilter queryFilter, boolean distinctDocuments, long countUpTo, Object... params) { 1202 final Timer.Context timerContext = queryTimer.time(); 1203 try { 1204 return mapper.queryProjection(query, queryType, queryFilter, distinctDocuments, countUpTo, params); 1205 } finally { 1206 long duration = timerContext.stop(); 1207 if ((LOG_MIN_DURATION_NS >= 0) && (duration > LOG_MIN_DURATION_NS)) { 1208 String msg = String.format("duration_ms:\t%.2f\t%s\tqueryProjection\t%s", duration / 1000000.0, 1209 queryFilter, query); 1210 if (log.isTraceEnabled()) { 1211 log.info(msg, new Throwable("Slow query stack trace")); 1212 } else { 1213 log.info(msg); 1214 } 1215 } 1216 } 1217 } 1218 1219 @Override 1220 public LockManager getLockManager() { 1221 return repository.getLockManager(); 1222 } 1223 1224 @Override 1225 public void requireReadAclsUpdate() { 1226 readAclsChanged = true; 1227 } 1228 1229 @Override 1230 public void updateReadAcls() { 1231 final Timer.Context timerContext = aclrUpdateTimer.time(); 1232 try { 1233 mapper.updateReadAcls(); 1234 readAclsChanged = false; 1235 } finally { 1236 timerContext.stop(); 1237 } 1238 } 1239 1240 @Override 1241 public void rebuildReadAcls() { 1242 mapper.rebuildReadAcls(); 1243 readAclsChanged = false; 1244 } 1245 1246 private void computeRootNode() { 1247 String repositoryId = repository.getName(); 1248 Serializable rootId = mapper.getRootId(repositoryId); 1249 if (rootId == null && COMPAT_REPOSITORY_NAME) { 1250 // compat, old repositories had fixed id "default" 1251 rootId = mapper.getRootId("default"); 1252 } 1253 if (rootId == null) { 1254 log.debug("Creating root"); 1255 addRootNode(); 1256 save(); 1257 // record information about the root id 1258 mapper.setRootId(repositoryId, rootNodeId); 1259 } else { 1260 rootNodeId = rootId; 1261 } 1262 } 1263 1264 // TODO factor with addChildNode 1265 private Node addRootNode() { 1266 rootNodeId = generateNewId(null); 1267 Node rootNode = addNode(rootNodeId, null, "", null, Model.ROOT_TYPE, false); 1268 addRootACP(rootNode); 1269 return rootNode; 1270 } 1271 1272 private void addRootACP(Node rootNode) { 1273 ACLRow[] aclrows = new ACLRow[3]; 1274 // TODO put groups in their proper place. like that now for consistency. 1275 aclrows[0] = new ACLRow(0, ACL.LOCAL_ACL, true, SecurityConstants.EVERYTHING, SecurityConstants.ADMINISTRATORS, 1276 null); 1277 aclrows[1] = new ACLRow(1, ACL.LOCAL_ACL, true, SecurityConstants.EVERYTHING, SecurityConstants.ADMINISTRATOR, 1278 null); 1279 aclrows[2] = new ACLRow(2, ACL.LOCAL_ACL, true, SecurityConstants.READ, SecurityConstants.MEMBERS, null); 1280 rootNode.setCollectionProperty(Model.ACL_PROP, aclrows); 1281 requireReadAclsUpdate(); 1282 } 1283 1284 // public Node newNodeInstance() needed ? 1285 1286 public void checkPermission(String absPath, String actions) { 1287 checkLive(); 1288 // TODO Auto-generated method stub 1289 throw new RuntimeException("Not implemented"); 1290 } 1291 1292 public boolean hasPendingChanges() { 1293 checkLive(); 1294 // TODO Auto-generated method stub 1295 throw new RuntimeException("Not implemented"); 1296 } 1297 1298 public void markReferencedBinaries() { 1299 checkLive(); 1300 mapper.markReferencedBinaries(); 1301 } 1302 1303 public int cleanupDeletedDocuments(int max, Calendar beforeTime) { 1304 checkLive(); 1305 if (!repository.getRepositoryDescriptor().getSoftDeleteEnabled()) { 1306 return 0; 1307 } 1308 return mapper.cleanupDeletedRows(max, beforeTime); 1309 } 1310 1311 /* 1312 * ----- XAResource ----- 1313 */ 1314 1315 @Override 1316 public boolean isSameRM(XAResource xaresource) { 1317 return xaresource == this; 1318 } 1319 1320 @Override 1321 public void start(Xid xid, int flags) throws XAException { 1322 if (flags == TMNOFLAGS) { 1323 try { 1324 processReceivedInvalidations(); 1325 } catch (NuxeoException e) { 1326 log.error("Could not start transaction", e); 1327 throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e); 1328 } 1329 } 1330 mapper.start(xid, flags); 1331 inTransaction = true; 1332 checkThreadStart(); 1333 } 1334 1335 @Override 1336 public void end(Xid xid, int flags) throws XAException { 1337 boolean failed = true; 1338 try { 1339 if (flags != TMFAIL) { 1340 try { 1341 flush(); 1342 } catch (ConcurrentUpdateException e) { 1343 TransactionHelper.noteSuppressedException(e); 1344 log.debug("Exception during transaction commit", e); 1345 // set rollback only manually instead of throwing, this avoids 1346 // a spurious log in Geronimo TransactionImpl and has the same effect 1347 TransactionHelper.setTransactionRollbackOnly(); 1348 return; 1349 } catch (NuxeoException e) { 1350 log.error("Exception during transaction commit", e); 1351 throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e); 1352 } 1353 } 1354 failed = false; 1355 mapper.end(xid, flags); 1356 } finally { 1357 if (failed) { 1358 mapper.end(xid, TMFAIL); 1359 // rollback done by tx manager 1360 } 1361 } 1362 } 1363 1364 @Override 1365 public int prepare(Xid xid) throws XAException { 1366 int res = mapper.prepare(xid); 1367 if (res == XA_RDONLY) { 1368 // Read-only optimization, commit() won't be called by the TM. 1369 // It's important to nevertheless send invalidations because 1370 // Oracle, in tightly-coupled transaction mode, can return 1371 // this status even when some changes were actually made 1372 // (they just will be committed by another resource). 1373 // See NXP-7943 1374 commitDone(); 1375 } 1376 return res; 1377 } 1378 1379 @Override 1380 public void commit(Xid xid, boolean onePhase) throws XAException { 1381 try { 1382 mapper.commit(xid, onePhase); 1383 } finally { 1384 commitDone(); 1385 } 1386 } 1387 1388 protected void commitDone() throws XAException { 1389 inTransaction = false; 1390 try { 1391 try { 1392 sendInvalidationsToOthers(); 1393 } finally { 1394 checkThreadEnd(); 1395 } 1396 } catch (NuxeoException e) { 1397 log.error("Could not send invalidations", e); 1398 throw (XAException) new XAException(XAException.XAER_RMERR).initCause(e); 1399 } 1400 } 1401 1402 @Override 1403 public void rollback(Xid xid) throws XAException { 1404 try { 1405 try { 1406 mapper.rollback(xid); 1407 } finally { 1408 rollback(); 1409 } 1410 } finally { 1411 inTransaction = false; 1412 // no invalidations to send 1413 checkThreadEnd(); 1414 } 1415 } 1416 1417 @Override 1418 public void forget(Xid xid) throws XAException { 1419 mapper.forget(xid); 1420 } 1421 1422 @Override 1423 public Xid[] recover(int flag) throws XAException { 1424 return mapper.recover(flag); 1425 } 1426 1427 @Override 1428 public boolean setTransactionTimeout(int seconds) throws XAException { 1429 return mapper.setTransactionTimeout(seconds); 1430 } 1431 1432 @Override 1433 public int getTransactionTimeout() throws XAException { 1434 return mapper.getTransactionTimeout(); 1435 } 1436 1437 public long getCacheSize() { 1438 return context.getCacheSize(); 1439 } 1440 1441 public long getCacheMapperSize() { 1442 return context.getCacheMapperSize(); 1443 } 1444 1445 public long getCachePristineSize() { 1446 return context.getCachePristineSize(); 1447 } 1448 1449 public long getCacheSelectionSize() { 1450 return context.getCacheSelectionSize(); 1451 } 1452 1453 @Override 1454 public Map<String, String> getBinaryFulltext(Serializable id) { 1455 if (repository.getRepositoryDescriptor().getFulltextDescriptor().getFulltextDisabled()) { 1456 return null; 1457 } 1458 RowId rowId = new RowId(Model.FULLTEXT_TABLE_NAME, id); 1459 return mapper.getBinaryFulltext(rowId); 1460 } 1461 1462 @Override 1463 public boolean isChangeTokenEnabled() { 1464 return changeTokenEnabled; 1465 } 1466 1467 @Override 1468 public void markUserChange(Serializable id) { 1469 context.markUserChange(id); 1470 } 1471 1472}