private Engine.IndexResult applyIndexOperation(Engine engine, long seqNo, long opPrimaryTerm, long version, VersionType versionType, long ifSeqNo, long ifPrimaryTerm, long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, SourceToParse sourceToParse) throws IOException { assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm + "]"; assert versionType.validateVersionForWrites(version); ensureWriteAllowed(origin); Engine.Index operation; try { operation = prepareIndex(docMapper(sourceToParse.type()), indexSettings.getIndexVersionCreated(), sourceToParse, seqNo, opPrimaryTerm, version, versionType, origin, autoGeneratedTimeStamp, isRetry, ifSeqNo, ifPrimaryTerm); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { return new Engine.IndexResult(update); } } catch (Exception e) { // We treat any exception during parsing and or mapping update as a document level failure // with the exception side effects of closing the shard. Since we don't have the shard, we // can not raise an exception that may block any replication of previous operations to the // replicas verifyNotClosed(e); return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo); } return index(engine, operation); }
private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, VersionType versionType, long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, SourceToParse sourceToParse) throws IOException { assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm + "]"; assert versionType.validateVersionForWrites(version); ensureWriteAllowed(origin); Engine.Index operation; try { operation = prepareIndex(docMapper(sourceToParse.type()), indexSettings.getIndexVersionCreated(), sourceToParse, seqNo, opPrimaryTerm, version, versionType, origin, autoGeneratedTimeStamp, isRetry); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { return new Engine.IndexResult(update); } } catch (Exception e) { // We treat any exception during parsing and or mapping update as a document level failure // with the exception side effects of closing the shard. Since we don't have the shard, we // can not raise an exception that may block any replication of previous operations to the // replicas verifyNotClosed(e); return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo); } return index(getEngine(), operation); }
private void applyOperation(Engine engine, Engine.Operation operation) throws IOException { switch (operation.operationType()) { case INDEX: Engine.Index engineIndex = (Engine.Index) operation; Mapping update = engineIndex.parsedDoc().dynamicMappingsUpdate(); if (engineIndex.parsedDoc().dynamicMappingsUpdate() != null) { recoveredTypes.compute(engineIndex.type(), (k, mapping) -> mapping == null ? update : mapping.merge(update, false)); } engine.index(engineIndex); break; case DELETE: engine.delete((Engine.Delete) operation); break; case NO_OP: engine.noOp((Engine.NoOp) operation); break; default: throw new IllegalStateException("No operation defined for [" + operation + "]"); } }
private ParsedDocument parseDocument(String index, String type, BytesReference doc) throws Throwable { MapperService mapperService = indexShard.mapperService(); // TODO: make parsing not dynamically create fields not in the original mapping DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); ParsedDocument parsedDocument = docMapper.getDocumentMapper().parse(source(doc).index(index).type(type).flyweight(true)); if (docMapper.getMapping() != null) { parsedDocument.addDynamicMappingsUpdate(docMapper.getMapping()); } if (parsedDocument.dynamicMappingsUpdate() != null) { mappingUpdatedAction.updateMappingOnMasterSynchronously(index, type, parsedDocument.dynamicMappingsUpdate()); } return parsedDocument; }
return new Engine.IndexResult(e, request.version()); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final ShardId shardId = primary.shardId(); if (update != null) { return new Engine.IndexResult(e, request.version()); update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new ReplicationOperation.RetryOnPrimaryException(shardId,
final Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final boolean dynamicMappingEnable = indexService.mapperService().dynamic(); if (update != null && dynamicMappingEnable) {
private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, VersionType versionType, long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, SourceToParse sourceToParse) throws IOException { assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm + "]"; assert versionType.validateVersionForWrites(version); ensureWriteAllowed(origin); Engine.Index operation; try { operation = prepareIndex(docMapper(sourceToParse.type()), indexSettings.getIndexVersionCreated(), sourceToParse, seqNo, opPrimaryTerm, version, versionType, origin, autoGeneratedTimeStamp, isRetry); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { return new Engine.IndexResult(update); } } catch (Exception e) { // We treat any exception during parsing and or mapping update as a document level failure // with the exception side effects of closing the shard. Since we don't have the shard, we // can not raise an exception that may block any replication of previous operations to the // replicas verifyNotClosed(e); return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo); } return index(getEngine(), operation); }
/** * Execute the given {@link IndexRequest} on a primary shard, throwing a * {@link RetryOnPrimaryException} if the operation needs to be re-tried. */ public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Throwable { Engine.IndexingOperation operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final ShardId shardId = indexShard.shardId(); if (update != null) { final String indexName = shardId.getIndex(); mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new RetryOnPrimaryException(shardId, "Dynamic mappings are not available on the node that holds the primary yet"); } } final boolean created = operation.execute(indexShard); // update the version on request so it will happen on the replicas final long version = operation.version(); request.version(version); request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); assert request.versionType().validateVersionForWrites(request.version()); return new WriteResult<>(new IndexResponse(shardId.getIndex(), request.type(), request.id(), request.version(), created), operation.getTranslogLocation()); } }
doc.addDynamicMappingsUpdate(docMapper.getMapping()); if (doc.dynamicMappingsUpdate() != null) { mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate());
.routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl()), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true); maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates); if (logger.isTraceEnabled()) { logger.trace("[translog] recover [index] op of [{}][{}]", index.type(), index.id());
/** * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) throws IOException { final ShardId shardId = replica.shardId(); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(), request.getContentType()).routing(request.routing()).parent(request.parent()) .timestamp(request.timestamp()).ttl(request.ttl()); final Engine.Index operation; try { operation = replica.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); } catch (MapperParsingException e) { return new Engine.IndexResult(e, request.version()); } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } return replica.index(operation); }
/** * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ public static Engine.IndexingOperation executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) { final ShardId shardId = indexShard.shardId(); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); final Engine.IndexingOperation operation; if (request.opType() == IndexRequest.OpType.INDEX) { operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.canHaveDuplicates()); } else { assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); operation = indexShard.prepareCreateOnReplica(sourceToParse, request.version(), request.versionType(), request.canHaveDuplicates(), request.autoGeneratedId()); } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } operation.execute(indexShard); return operation; }
.routing(create.routing()).parent(create.parent()).timestamp(create.timestamp()).ttl(create.ttl()), create.version(), create.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY, true, false); maybeAddMappingUpdate(engineCreate.type(), engineCreate.parsedDoc().dynamicMappingsUpdate(), engineCreate.id(), allowMappingUpdates); if (logger.isTraceEnabled()) { logger.trace("[translog] recover [create] op of [{}][{}]", create.type(), create.id()); .routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl()), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY, true); maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates); if (logger.isTraceEnabled()) { logger.trace("[translog] recover [index] op of [{}][{}]", index.type(), index.id());