protected static String getPrimitiveAvroTypeFromCassandraType(DataType dataType) { // Map types from Cassandra to Avro where possible if (dataType.equals(DataType.ascii()) || dataType.equals(DataType.text()) || dataType.equals(DataType.varchar()) // Nonstandard types represented by this processor as a string || dataType.equals(DataType.timestamp()) || dataType.equals(DataType.timeuuid()) || dataType.equals(DataType.uuid()) || dataType.equals(DataType.inet()) || dataType.equals(DataType.varint())) { return "string"; } else if (dataType.equals(DataType.cboolean())) { return "boolean"; } else if (dataType.equals(DataType.cint())) { return "int"; } else if (dataType.equals(DataType.bigint()) || dataType.equals(DataType.counter())) { return "long"; } else if (dataType.equals(DataType.cfloat())) { return "float"; } else if (dataType.equals(DataType.cdouble())) { return "double"; } else if (dataType.equals(DataType.blob())) { return "bytes"; } else { throw new IllegalArgumentException("createSchema: Unknown Cassandra data type " + dataType.getName() + " cannot be converted to Avro type"); } }
private String getCreateWorkflowsTableStatement() { return SchemaBuilder.createTable(config.getCassandraKeyspace(), TABLE_WORKFLOWS) .ifNotExists() .addPartitionKey(WORKFLOW_ID_KEY, DataType.uuid()) .addPartitionKey(SHARD_ID_KEY, DataType.cint()) .addClusteringColumn(ENTITY_KEY, DataType.text()) .addClusteringColumn(TASK_ID_KEY, DataType.text()) .addColumn(PAYLOAD_KEY, DataType.text()) .addStaticColumn(TOTAL_TASKS_KEY, DataType.cint()) .addStaticColumn(TOTAL_PARTITIONS_KEY, DataType.cint()) .getQueryString(); }
return row.getBool(i); } else if (dataType.equals(DataType.cint())) { return row.getInt(i);
statement.setBool(paramIndex, (boolean) typeCodec.parse(paramValue)); } else if (mainType.equals(DataType.cint())) { statement.setInt(paramIndex, (int) typeCodec.parse(paramValue));
private IntCodec() { super(DataType.cint()); }
@Test(groups = "unit") public void parseTupleTest() { String s = "org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.FloatType)"; TupleType type = (TupleType) DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry); assertNotNull(type); assertEquals(type.getComponentTypes().get(0), DataType.cint()); assertEquals(type.getComponentTypes().get(1), DataType.text()); assertEquals(type.getComponentTypes().get(2), DataType.cfloat()); }
@Test( groups = "unit", expectedExceptions = {IllegalArgumentException.class}) public void collectionTooLargeTest() throws Exception { DataType cqlType = DataType.list(DataType.cint()); List<Integer> list = Collections.nCopies(65536, 1); TypeCodec<List<?>> codec = codecRegistry.codecFor(cqlType); codec.serialize(list, ProtocolVersion.V2); }
@SuppressWarnings("deprecation") @Test(groups = "short") public void should_handle_collections_of_tuples() { String query; BuiltStatement statement; query = "UPDATE foo SET l=[(1,2)] WHERE k=1;"; TupleType tupleType = cluster().getMetadata().newTupleType(cint(), cint()); List<TupleValue> list = ImmutableList.of(tupleType.newValue(1, 2)); statement = update("foo").with(set("l", list)).where(eq("k", 1)); assertThat(statement.toString()).isEqualTo(query); } }
/** * Create index label table for vertex/edge * @param session DB session * @param table index table name */ private static void createIndexTable(CassandraSessionPool.Session session, String table) { Create tableBuilder = SchemaBuilder.createTable(table).ifNotExists(); tableBuilder.addPartitionKey(LABEL, DataType.cint()); tableBuilder.addColumn(ELEMENT_IDS, DataType.set(DataType.text())); session.execute(tableBuilder); }
public void createIndex(CassandraSessionPool.Session session) { Create create = SchemaBuilder.createTable(this.table) .ifNotExists(); create.addPartitionKey(NAME, DataType.text()); create.addColumn(ELEMENT_IDS, DataType.set(DataType.cint())); session.execute(create); }
@Test(groups = "unit") public void should_ignore_codec_colliding_with_already_registered_codec() { MemoryAppender logs = startCapturingLogs(); CodecRegistry registry = new CodecRegistry(); TypeCodec<?> newCodec = mockCodec(cint(), of(Integer.class)); registry.register(newCodec); assertThat(logs.getNext()).contains("Ignoring codec MockCodec"); assertThat(registry.codecFor(cint(), Integer.class)).isNotSameAs(newCodec); stopCapturingLogs(logs); }
@SuppressWarnings("deprecation") @Test(groups = "unit") public void parseFormatTupleTest() { String toParse = "(1,'foo',1.0)"; TupleType t = new TupleType( newArrayList(DataType.cint(), DataType.text(), DataType.cfloat()), protocolVersion, codecRegistry); TupleValue toFormat = t.newValue(1, "foo", 1.0f); assertEquals(codecRegistry.codecFor(t).parse(toParse), toFormat); assertEquals(codecRegistry.codecFor(t).format(toFormat), toParse); }
@Test(groups = "unit") public void should_ignore_codec_colliding_with_already_generated_codec() { MemoryAppender logs = startCapturingLogs(); CodecRegistry registry = new CodecRegistry(); // Force generation of a list token from the default token registry.codecFor(list(cint()), listOf(Integer.class)); TypeCodec<?> newCodec = mockCodec(list(cint()), listOf(Integer.class)); registry.register(newCodec); assertThat(logs.getNext()).contains("Ignoring codec MockCodec"); assertThat(registry.codecFor(list(cint()), listOf(Integer.class))).isNotSameAs(newCodec); stopCapturingLogs(logs); }
@Test(groups = "short") public void should_handle_tuple() throws Exception { String query = "INSERT INTO foo (k,x) VALUES (0,(1));"; TupleType tupleType = cluster().getMetadata().newTupleType(cint()); BuiltStatement insert = insertInto("foo").value("k", 0).value("x", tupleType.newValue(1)); assertEquals(insert.toString(), query); }
@Test(groups = "short") public void should_parse_tuple_types() { assertThat(parse("tuple<int,list<text>>", cluster(), null, null, null, false, false)) .isEqualTo(cluster().getMetadata().newTupleType(cint(), list(text()))); }
/** Helper method for creating nested tuples */ private TupleValue nestedTuplesCreatorHelper(int depth) { if (depth == 1) { TupleType baseTuple = cluster().getMetadata().newTupleType(DataType.cint()); return baseTuple.newValue(303); } else { TupleValue innerTuple = nestedTuplesCreatorHelper(depth - 1); TupleType t = cluster().getMetadata().newTupleType(innerTuple.getType()); return t.newValue(innerTuple); } }
@Test(groups = "short") public void should_ignore_whitespace() { assertThat(parse(" int ", cluster(), null, null, null, false, false)).isEqualTo(cint()); assertThat(parse(" set < bigint > ", cluster(), null, null, null, false, false)) .isEqualTo(set(bigint())); assertThat(parse(" map < date , timeuuid > ", cluster(), null, null, null, false, false)) .isEqualTo(map(date(), timeuuid())); }
@Test(groups = "short") public void should_parse_frozen_collection_types() { assertThat(parse("frozen<list<int>>", cluster(), null, null, null, false, false)) .isEqualTo(list(cint(), true)); assertThat(parse("frozen<set<bigint>>", cluster(), null, null, null, false, false)) .isEqualTo(set(bigint(), true)); assertThat(parse("frozen<map<date,timeuuid>>", cluster(), null, null, null, false, false)) .isEqualTo(map(date(), timeuuid(), true)); }
@Test(groups = "short") public void should_parse_collection_types() { assertThat(parse("list<int>", cluster(), null, null, null, false, false)) .isEqualTo(list(cint())); assertThat(parse("set<bigint>", cluster(), null, null, null, false, false)) .isEqualTo(set(bigint())); assertThat(parse("map<date,timeuuid>", cluster(), null, null, null, false, false)) .isEqualTo(map(date(), timeuuid())); }
@Test(groups = "short") public void should_ignore_case() { assertThat(parse("INT", cluster(), null, null, null, false, false)).isEqualTo(cint()); assertThat(parse("SET<BIGint>", cluster(), null, null, null, false, false)) .isEqualTo(set(bigint())); assertThat( parse("FROZEN<mAp<Date,Tuple<timeUUID>>>", cluster(), null, null, null, false, false)) .isEqualTo(map(date(), cluster().getMetadata().newTupleType(timeuuid()), true)); }