Refine search
@Test public void testMapFieldExtractorNested() throws Exception { ConstantFieldExtractor cfe = new MapWritableFieldExtractor(); Map<Writable, Writable> m = new MapWritable(); MapWritable nested = new MapWritable(); nested.put(new Text("bar"), new Text("found")); m.put(new Text("foo"), nested); assertEquals(new Text("found"), extract(cfe, "foo.bar", m)); } }
public MapWritable readMap(MapWritable mw) throws IOException { if (mw == null) { mw = new MapWritable(); } int length = in.readMapHeader(); for (int i = 0; i < length; i++) { Writable key = read(); Writable value = read(); mw.put(key, value); } return mw; }
/** * Sets configurations for multiple tables at a time. * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf * the Hadoop configuration object to configure * @param configs * an array of InputTableConfig objects to associate with the job * @since 1.6.0 */ public static void setInputTableConfigs(Class<?> implementingClass, Configuration conf, Map<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> configs) { MapWritable mapWritable = new MapWritable(); for (Map.Entry<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> tableConfig : configs .entrySet()) mapWritable.put(new Text(tableConfig.getKey()), tableConfig.getValue()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { mapWritable.write(new DataOutputStream(baos)); } catch (IOException e) { throw new IllegalStateException("Table configuration could not be serialized."); } String confKey = enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS); conf.set(confKey, Base64.getEncoder().encodeToString(baos.toByteArray())); }
/** * Assert MapWritable does not grow across calls to readFields. * @throws Exception * @see <a href="https://issues.apache.org/jira/browse/HADOOP-2244">HADOOP-2244</a> */ public void testMultipleCallsToReadFieldsAreSafe() throws Exception { // Create an instance and add a key/value. MapWritable m = new MapWritable(); final Text t = new Text(getName()); m.put(t, t); // Get current size of map. Key values are 't'. int count = m.size(); // Now serialize... save off the bytes. ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); m.write(dos); dos.close(); // Now add new values to the MapWritable. m.put(new Text("key1"), new Text("value1")); m.put(new Text("key2"), new Text("value2")); // Now deserialize the original MapWritable. Ensure count and key values // match original state. ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); DataInputStream dis = new DataInputStream(bais); m.readFields(dis); assertEquals(count, m.size()); assertTrue(m.get(t).equals(t)); dis.close(); } }
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String[] fields = value.toString().split("\t"); MapWritable record = new MapWritable(); for (int i = 0; i < fields.length; i++) { if (i < fieldNames.length) { record.put(new Text(fieldNames[i]), new Text(fields[i])); } } context.write(NullWritable.get(), record); }
@Override public long getMessageSize() { Text key = new Text("message.size"); if (this.partitionMap.containsKey(key)) return ((LongWritable) this.partitionMap.get(key)).get(); else return 1024; //default estimated size }
/** * Test that number of "unknown" classes is propagated across multiple copies. */ @SuppressWarnings("deprecation") public void testForeignClass() { MapWritable inMap = new MapWritable(); inMap.put(new Text("key"), new UTF8("value")); inMap.put(new Text("key2"), new UTF8("value2")); MapWritable outMap = new MapWritable(inMap); MapWritable copyOfCopy = new MapWritable(outMap); assertEquals(1, copyOfCopy.getNewClasses()); }
@Test public void testMapWritableFieldExtractorTopLevel() throws Exception { ConstantFieldExtractor cfe = new MapWritableFieldExtractor(); Map<Writable, Writable> m = new MapWritable(); m.put(new Text("key"), new Text("value")); assertEquals(new Text("value"), extract(cfe, "key", m)); }
private void receiveSyncedItemFactorizedValues( BSPPeer<Text, VectorWritable, Text, VectorWritable, MapWritable> peer) throws IOException { MapWritable msg = new MapWritable(); Text itemId = null; // messages are arriving take them while ((msg = peer.getCurrentMessage()) != null) { itemId = (Text) msg.get(OnlineCF.Settings.MSG_ITEM_MATRIX); itemsMatrix.put(itemId.toString(), (VectorWritable)msg.get(OnlineCF.Settings.MSG_VALUE)); } }
configs.put(defaultConfig.getKey(), defaultConfig.getValue()); String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS)); MapWritable mapWritable = new MapWritable(); if (configString != null) { try { byte[] bytes = Base64.getDecoder().decode(configString); ByteArrayInputStream bais = new ByteArrayInputStream(bytes); mapWritable.readFields(new DataInputStream(bais)); bais.close(); } catch (IOException e) { for (Map.Entry<Writable,Writable> entry : mapWritable.entrySet()) configs.put(entry.getKey().toString(), (org.apache.accumulo.core.client.mapreduce.InputTableConfig) entry.getValue());
boolean hasMetadata = false; if (version < 7) { MapWritable oldMetaData = new MapWritable(); if (in.readBoolean()) { hasMetadata = true; metaData = new org.apache.hadoop.io.MapWritable(); oldMetaData.readFields(in); for (Writable key : oldMetaData.keySet()) { metaData.put(key, oldMetaData.get(key)); metaData = new org.apache.hadoop.io.MapWritable(); metaData.readFields(in);
@Override public void readFields(DataInput in) throws IOException { this.leaderId = UTF8.readString(in); this.partition = in.readInt(); this.beginOffset = in.readLong(); this.offset = in.readLong(); this.checksum = in.readLong(); this.topic = in.readUTF(); this.time = in.readLong(); this.server = in.readUTF(); // left for legacy this.service = in.readUTF(); // left for legacy this.partitionMap = new MapWritable(); try { this.partitionMap.readFields(in); } catch (IOException e) { this.setServer(this.server); this.setService(this.service); } }