public RealtimeSegmentZKMetadata(ZNRecord znRecord) { super(znRecord); setSegmentType(SegmentType.REALTIME); _status = Status.valueOf(znRecord.getSimpleField(CommonConstants.Segment.Realtime.STATUS)); _sizeThresholdToFlushSegment = znRecord.getIntField(CommonConstants.Segment.FLUSH_THRESHOLD_SIZE, -1); String flushThresholdTime = znRecord.getSimpleField(CommonConstants.Segment.FLUSH_THRESHOLD_TIME); if (flushThresholdTime != null && !flushThresholdTime.equals(NULL)) { _timeThresholdToFlushSegment = znRecord.getSimpleField(CommonConstants.Segment.FLUSH_THRESHOLD_TIME); } }
/** * Get maximum allowed running task count on this instance * @return the maximum task count */ public int getMaxConcurrentTask() { return _record.getIntField(InstanceConfigProperty.MAX_CONCURRENT_TASK.name(), MAX_CONCURRENT_TASK_NOT_SET); }
/** * Get the time to wait before stopping execution of this message * @return the timeout in ms, or -1 indicating no timeout */ public int getExecutionTimeout() { return _record.getIntField(Attributes.TIMEOUT.toString(), -1); }
/** * Get maximum allowed running task count on this instance * @return the maximum task count */ public int getMaxConcurrentTask() { return _record.getIntField(InstanceConfigProperty.MAX_CONCURRENT_TASK.name(), MAX_CONCURRENT_TASK_NOT_SET); }
/** * Get the max offline instances allowed for the cluster. * * @return */ public int getMaxOfflineInstancesAllowed() { return _record.getIntField(ClusterConfigProperty.MAX_OFFLINE_INSTANCES_ALLOWED.name(), -1); }
/** * Get the number of minimal active partitions for this resource. * * @return */ public int getMinActiveReplicas() { return _record.getIntField(IdealStateProperty.MIN_ACTIVE_REPLICAS.toString(), -1); }
/** * Get the time to wait before stopping execution of this message * @return the timeout in ms, or -1 indicating no timeout */ public int getExecutionTimeout() { return _record.getIntField(Attributes.TIMEOUT.toString(), -1); }
/** * Get the max offline instances allowed for the cluster. * * @return */ public int getMaxOfflineInstancesAllowed() { return _record.getIntField(ClusterConfigProperty.MAX_OFFLINE_INSTANCES_ALLOWED.name(), -1); }
/** * Get the number of partitions of this resource * @return the number of partitions */ public int getNumPartitions() { return _record.getIntField(ResourceConfigProperty.NUM_PARTITIONS.name(), 0); }
/** * Get the maximum number of partitions an instance can serve in this cluster. * * @return the partition capacity of an instance for this resource, or Integer.MAX_VALUE */ public int getMaxPartitionsPerInstance() { return _record.getIntField(ClusterConfigProperty.MAX_PARTITIONS_PER_INSTANCE.name(), -1); }
/** * Get maximum allowed running task count on all instances in this cluster. * @return the maximum task count */ public int getMaxConcurrentTaskPerInstance() { return _record.getIntField(ClusterConfigProperty.MAX_CONCURRENT_TASK_PER_INSTANCE.name(), DEFAULT_MAX_CONCURRENT_TASK_PER_INSTANCE); }
/** * Get maximum allowed running task count on all instances in this cluster. * @return the maximum task count */ public int getMaxConcurrentTaskPerInstance() { return _record.getIntField(ClusterConfigProperty.MAX_CONCURRENT_TASK_PER_INSTANCE.name(), DEFAULT_MAX_CONCURRENT_TASK_PER_INSTANCE); }
/** * Get the number of minimal active partitions for this resource. * * @return */ public int getMinActiveReplicas() { return _record.getIntField(IdealStateProperty.MIN_ACTIVE_REPLICAS.toString(), -1); }
/** * Get the threshold for the number of partitions needing recovery or in error. Default value is set at * Integer.MAX_VALUE to allow recovery rebalance and load rebalance to happen in the same pipeline * cycle. If the number of partitions needing recovery is greater than this threshold, recovery * balance will take precedence and load balance will not happen during this cycle. * @return the threshold */ public int getErrorOrRecoveryPartitionThresholdForLoadBalance() { return _record.getIntField( ClusterConfigProperty.ERROR_OR_RECOVERY_PARTITION_THRESHOLD_FOR_LOAD_BALANCE.name(), DEFAULT_ERROR_OR_RECOVERY_PARTITION_THRESHOLD_FOR_LOAD_BALANCE); }
/** * This Failure threshold only works for generic workflow. Will be ignored by JobQueue * @return */ public int getFailureThreshold() { return _record.getIntField(WorkflowConfigProperty.FailureThreshold.name(), DEFAULT_FAILURE_THRESHOLD); }
/** * Determine the number of jobs that this workflow can accept before rejecting further jobs, * this field is only used when a workflow is not terminable. * @return queue capacity */ public int getCapacity() { return _record.getIntField(WorkflowConfigProperty.capacity.name(), DEFAULT_CAPACITY); }
/** * Get the maximum number of partitions an instance can serve in this cluster. * * @return the partition capacity of an instance for this resource, or Integer.MAX_VALUE */ public int getMaxPartitionsPerInstance() { return _record.getIntField(ClusterConfigProperty.MAX_PARTITIONS_PER_INSTANCE.name(), -1); }
/** * Get the threshold for the number of partitions needing recovery or in error. Default value is set at * Integer.MAX_VALUE to allow recovery rebalance and load rebalance to happen in the same pipeline * cycle. If the number of partitions needing recovery is greater than this threshold, recovery * balance will take precedence and load balance will not happen during this cycle. * @return the threshold */ public int getErrorOrRecoveryPartitionThresholdForLoadBalance() { return _record.getIntField( ClusterConfigProperty.ERROR_OR_RECOVERY_PARTITION_THRESHOLD_FOR_LOAD_BALANCE.name(), DEFAULT_ERROR_OR_RECOVERY_PARTITION_THRESHOLD_FOR_LOAD_BALANCE); }
int getWeight(String resource, String partition) { Map<String, String> partitionWeightMap = _record.getMapField(resource); if (partitionWeightMap != null && partitionWeightMap.containsKey(partition)) { return Integer.parseInt(partitionWeightMap.get(partition)); } return _record.getIntField(getWeightKey(resource), getDefaultWeight()); }
int getWeight(String resource, String partition) { Map<String, String> partitionWeightMap = _record.getMapField(resource); if (partitionWeightMap != null && partitionWeightMap.containsKey(partition)) { return Integer.parseInt(partitionWeightMap.get(partition)); } return _record.getIntField(getWeightKey(resource), getDefaultWeight()); }