@Override public MapWritable createValue() { return new MapWritable(); }
@Override public Map<Writable, Writable> createValue() { return (useLinkedMapWritable ? new LinkedMapWritable() : new MapWritable()); }
private Writable wrapJsonData(Writable blob) { Assert.isTrue(blob instanceof Text, "Property `es.output.json` is enabled, but returned data was not of type Text..."); switch (structTypeInfo.getCategory()) { case STRUCT: Map<Writable, Writable> mapContainer = new MapWritable(); mapContainer.put(jsonFieldName, blob); return (Writable) mapContainer; default: throw new EsHadoopIllegalStateException("Could not correctly wrap JSON data for structural type " + structTypeInfo.getCategory()); } }
public MapWritable readMap(MapWritable mw) throws IOException { if (mw == null) { mw = new MapWritable(); } int length = in.readMapHeader(); for (int i = 0; i < length; i++) { Writable key = read(); Writable value = read(); mw.put(key, value); } return mw; }
@Test public void testMapFieldExtractorNested() throws Exception { ConstantFieldExtractor cfe = new MapWritableFieldExtractor(); Map<Writable, Writable> m = new MapWritable(); MapWritable nested = new MapWritable(); nested.put(new Text("bar"), new Text("found")); m.put(new Text("foo"), nested); assertEquals(new Text("found"), extract(cfe, "foo.bar", m)); } }
@Test public void testMapWritableFieldExtractorTopLevel() throws Exception { ConstantFieldExtractor cfe = new MapWritableFieldExtractor(); Map<Writable, Writable> m = new MapWritable(); m.put(new Text("key"), new Text("value")); assertEquals(new Text("value"), extract(cfe, "key", m)); }
MapWritable map = new MapWritable(); Set<Object> set = (Set) object; for (Object obj : set) { MapWritable result = new MapWritable(); Map<Object, Object> map = (Map) object; for (Map.Entry<?, ?> entry : map.entrySet()) {
@Test public void testMapWritableFieldExtractorNestedNotFound() throws Exception { ConstantFieldExtractor cfe = new MapWritableFieldExtractor(); Map<Writable, Writable> m = new MapWritable(); assertEquals(FieldExtractor.NOT_FOUND, extract(cfe, "key", m)); }
/** * returns a MapWritable if it was set or read in @see readFields(DataInput), * returns empty map in case CrawlDatum was freshly created (lazily instantiated). */ public org.apache.hadoop.io.MapWritable getMetaData() { if (this.metaData == null) this.metaData = new org.apache.hadoop.io.MapWritable(); return this.metaData; }
public void clear() { leaderId = ""; partition = 0; beginOffset = 0; offset = 0; checksum = 0; topic = ""; time = 0; server = ""; service = ""; partitionMap = new MapWritable(); }
public EtlKey(EtlKey other) { this.partition = other.partition; this.beginOffset = other.beginOffset; this.offset = other.offset; this.checksum = other.checksum; this.topic = other.topic; this.time = other.time; this.server = other.server; this.service = other.service; this.partitionMap = new MapWritable(other.partitionMap); }
@Test public void generateEventHiveRecordLimited() throws Exception { Map<Writable, Writable> map = new MapWritable(); map.put(new Text("one"), new IntWritable(1)); map.put(new Text("two"), new IntWritable(2)); map.put(new Text("three"), new IntWritable(3)); HiveType tuple = new HiveType(map, TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo( TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.intTypeInfo))); SerializationEventConverter eventConverter = new SerializationEventConverter(); SerializationFailure iaeFailure = new SerializationFailure(new IllegalArgumentException("garbage"), tuple, new ArrayList<String>()); String rawEvent = eventConverter.getRawEvent(iaeFailure); assertThat(rawEvent, startsWith("HiveType{object=org.apache.hadoop.io.MapWritable@")); String timestamp = eventConverter.getTimestamp(iaeFailure); assertTrue(StringUtils.hasText(timestamp)); assertTrue(DateUtils.parseDate(timestamp).getTime().getTime() > 1L); String exceptionType = eventConverter.renderExceptionType(iaeFailure); assertEquals("illegal_argument_exception", exceptionType); String exceptionMessage = eventConverter.renderExceptionMessage(iaeFailure); assertEquals("garbage", exceptionMessage); String eventMessage = eventConverter.renderEventMessage(iaeFailure); assertEquals("Could not construct bulk entry from record", eventMessage); } }
public void setMetaData(org.apache.hadoop.io.MapWritable mapWritable) { this.metaData = new org.apache.hadoop.io.MapWritable(mapWritable); }
/** Copy the contents of another instance into this instance. */ public void set(CrawlDatum that) { this.status = that.status; this.fetchTime = that.fetchTime; this.retries = that.retries; this.fetchInterval = that.fetchInterval; this.score = that.score; this.modifiedTime = that.modifiedTime; this.signature = that.signature; if (that.metaData != null) { this.metaData = new org.apache.hadoop.io.MapWritable(that.metaData); // make a deep copy } else { this.metaData = null; } }
@Test public void generateEventWritable() throws Exception { MapWritable document = new MapWritable(); document.put(new Text("field"), new Text("value")); SerializationEventConverter eventConverter = new SerializationEventConverter(); SerializationFailure iaeFailure = new SerializationFailure(new IllegalArgumentException("garbage"), document, new ArrayList<String>()); String rawEvent = eventConverter.getRawEvent(iaeFailure); assertThat(rawEvent, Matchers.startsWith("org.apache.hadoop.io.MapWritable@")); String timestamp = eventConverter.getTimestamp(iaeFailure); assertTrue(StringUtils.hasText(timestamp)); assertTrue(DateUtils.parseDate(timestamp).getTime().getTime() > 1L); String exceptionType = eventConverter.renderExceptionType(iaeFailure); assertEquals("illegal_argument_exception", exceptionType); String exceptionMessage = eventConverter.renderExceptionMessage(iaeFailure); assertEquals("garbage", exceptionMessage); String eventMessage = eventConverter.renderEventMessage(iaeFailure); assertEquals("Could not construct bulk entry from record", eventMessage); } }
@Override public void readFields(DataInput in) throws IOException { this.leaderId = UTF8.readString(in); this.partition = in.readInt(); this.beginOffset = in.readLong(); this.offset = in.readLong(); this.checksum = in.readLong(); this.topic = in.readUTF(); this.time = in.readLong(); this.server = in.readUTF(); // left for legacy this.service = in.readUTF(); // left for legacy this.partitionMap = new MapWritable(); try { this.partitionMap.readFields(in); } catch (IOException e) { this.setServer(this.server); this.setService(this.service); } }
/** * Sets configurations for multiple tables at a time. * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf * the Hadoop configuration object to configure * @param configs * an array of InputTableConfig objects to associate with the job * @since 1.6.0 */ public static void setInputTableConfigs(Class<?> implementingClass, Configuration conf, Map<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> configs) { MapWritable mapWritable = new MapWritable(); for (Map.Entry<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> tableConfig : configs .entrySet()) mapWritable.put(new Text(tableConfig.getKey()), tableConfig.getValue()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { mapWritable.write(new DataOutputStream(baos)); } catch (IOException e) { throw new IllegalStateException("Table configuration could not be serialized."); } String confKey = enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS); conf.set(confKey, Base64.getEncoder().encodeToString(baos.toByteArray())); }
configs.put(defaultConfig.getKey(), defaultConfig.getValue()); String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS)); MapWritable mapWritable = new MapWritable(); if (configString != null) { try {
boolean hasMetadata = false; if (version < 7) { MapWritable oldMetaData = new MapWritable(); if (in.readBoolean()) { hasMetadata = true; metaData = new org.apache.hadoop.io.MapWritable(); oldMetaData.readFields(in); if (in.readBoolean()) { hasMetadata = true; metaData = new org.apache.hadoop.io.MapWritable(); metaData.readFields(in);
/** Returns the Metadata or a new MapWritable if it has not been set **/ public MapWritable getMetadata(boolean create) { if (metadata == null && create) metadata = new MapWritable(); return getMetadata(); }