Skip to content
This repository was archived by the owner on Dec 30, 2020. It is now read-only.

Commit e1772af

Browse files
committed
Merge branch 'release/3.1.0'
2 parents bc9c178 + 79139fd commit e1772af

16 files changed

+555
-78
lines changed

README.rst

Lines changed: 46 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
Cassandra JDBC wrapper for the Datastax Java Driver
22
===================================================
33

4-
This is the jdbc wrapper of the DataStax Java Driver for Apache Cassandra (C*),
5-
which offers a simple JDBC compliant API to work with CQL3.
4+
This is the jdbc wrapper of the DataStax Java Driver for Apache Cassandra (C*),
5+
which offers a simple JDBC compliant API to work with CQL3.
66

77

88
Features
@@ -23,7 +23,7 @@ The JDBC wrapper offers access to most of the core module features:
2323
automatically and transparently tries other nodes and schedules
2424
reconnection to the dead nodes in the background.
2525
- Convenient schema access: the driver exposes a C* schema in a usable way.
26-
26+
2727

2828
Prerequisite
2929
------------
@@ -48,11 +48,11 @@ Installing
4848

4949
The last release of the driver is available on Maven Central. You can install
5050
it in your application using the following Maven dependency::
51-
51+
5252
<dependency>
5353
<groupId>com.github.adejanovski</groupId>
5454
<artifactId>cassandra-jdbc-wrapper</artifactId>
55-
<version>3.0.3</version>
55+
<version>3.1.0</version>
5656
</dependency>
5757

5858
Or get the `fat jar with all dependencies included <https://drive.google.com/folderview?id=0B7fwX0DqcWSTNzZianJrWDI2bHc&usp=sharing#list>`_.
@@ -66,8 +66,8 @@ Connect to a Cassandra cluster using the following arguments::
6666
JDBC URL : jdbc:cassandra://host1--host2--host3:9042/keyspace
6767

6868

69-
70-
You can give the driver any number of host you want seperated by "--".
69+
70+
You can give the driver any number of host you want seperated by "--".
7171
They will be used as contact points for the driver to discover the entire cluster.
7272
Give enough hosts taking into account that some nodes may be unavailable upon establishing the JDBC connection.
7373

@@ -77,7 +77,7 @@ Java sample::
7777

7878
Class.forName("com.github.adejanovski.cassandra.jdbc.CassandraDriver");
7979
String URL = "jdbc:cassandra://host1--host2--host3:9042/keyspace1";
80-
connection = DriverManager.getConnection(URL);
80+
connection = DriverManager.getConnection(URL);
8181

8282

8383
Specifying load balancing policies
@@ -136,7 +136,7 @@ Consistency level defaults to ONE if not specified.
136136
Using simple statements
137137
-----------------------
138138

139-
To issue a simple select and get data from it::
139+
To issue a simple select and get data from it::
140140

141141
statement = connection.createStatement();
142142
ResultSet result = statement.executeQuery("SELECT bValue,iValue FROM test_table WHERE keyname='key0';");
@@ -149,28 +149,28 @@ To issue a simple select and get data from it::
149149
Using Prepared statements
150150
-------------------------
151151

152-
Considering the following table::
152+
Considering the following table::
153153

154-
CREATE TABLE table1
155-
(bigint_col bigint PRIMARY KEY, ascii_col ascii , blob_col blob, boolean_col boolean,
156-
decimal_col decimal, double_col double, float_col float, inet_col inet, int_col int,
157-
text_col text, timestamp_col timestamp, uuid_col uuid,
154+
CREATE TABLE table1
155+
(bigint_col bigint PRIMARY KEY, ascii_col ascii , blob_col blob, boolean_col boolean,
156+
decimal_col decimal, double_col double, float_col float, inet_col inet, int_col int,
157+
text_col text, timestamp_col timestamp, uuid_col uuid,
158158
timeuuid_col timeuuid, varchar_col varchar, varint_col varint,string_set_col set<text>,
159159
string_list_col list<text>, string_map_col map<text,text>
160160
);
161161

162162

163-
Prepared statements to insert a record in "table1"::
163+
Prepared statements to insert a record in "table1"::
164164

165165
String insert = "INSERT INTO table1(bigint_col , ascii_col , blob_col , boolean_col , decimal_col , double_col , "
166166
+ "float_col , inet_col , int_col , text_col , timestamp_col , uuid_col , timeuuid_col , varchar_col , varint_col, string_set_col, string_list_col, string_map_col) "
167167
+ " values(?, ?, ?, ?, ?, ? , ?, ? , ? , ?, ? , ? , now(), ? , ?, ?, ?, ? );";
168-
168+
169169
PreparedStatement pstatement = connection.prepareStatement(insert);
170-
171-
170+
171+
172172
pstatement.setObject(1, 1L); // bigint
173-
pstatement.setObject(2, "test"); // ascii
173+
pstatement.setObject(2, "test"); // ascii
174174
pstatement.setObject(3, new ByteArrayInputStream("test".getBytes("UTF-8"))); // blob
175175
pstatement.setObject(4, true); // boolean
176176
pstatement.setObject(5, new BigDecimal(5.1)); // decimal
@@ -184,7 +184,7 @@ Prepared statements to insert a record in "table1"::
184184
UUID uuid = UUID.randomUUID();
185185
pstatement.setObject(12, uuid ); // uuid
186186
pstatement.setObject(13, "test"); // varchar
187-
pstatement.setObject(14, 1);
187+
pstatement.setObject(14, 1);
188188
HashSet<String> mySet = new HashSet<String>();
189189
mySet.add("test");
190190
mySet.add("test");
@@ -197,7 +197,7 @@ Prepared statements to insert a record in "table1"::
197197
myMap.put("1","test");
198198
myMap.put("2","test");
199199
pstatement.setObject(17, myMap);
200-
200+
201201
pstatement.execute();
202202

203203

@@ -215,20 +215,20 @@ With simple statements::
215215
for(int i=0;i<10;i++){
216216
statement.addBatch("INSERT INTO testcollection (k,L) VALUES( " + i + ",[1, 3, 12345])");
217217
}
218-
218+
219219
int[] counts = statement.executeBatch();
220220
statement.close();
221221

222222
With prepared statements::
223223

224224
PreparedStatement statement = con.prepareStatement("INSERT INTO testcollection (k,L) VALUES(?,?)");
225-
225+
226226
for(int i=0;i<10;i++){
227227
statement.setInt(1, i);
228228
statement.setString(2, "[1, 3, 12345]");
229229
statement.addBatch();
230230
}
231-
231+
232232
int[] counts = statement.executeBatch();
233233
statement.close();
234234

@@ -237,12 +237,12 @@ With prepared statements::
237237
The second one is to put all the queries in a single CQL statement, each ended with a semicolon (;)::
238238

239239
Statement statement = con.createStatement();
240-
241-
StringBuilder queryBuilder = new StringBuilder();
240+
241+
StringBuilder queryBuilder = new StringBuilder();
242242
for(int i=0;i<10;i++){
243243
queryBuilder.append("INSERT INTO testcollection (k,L) VALUES( " + i + ",[1, 3, 12345]);");
244244
}
245-
245+
246246
statement.execute(queryBuilder.toString());
247247
statement.close();
248248

@@ -255,15 +255,15 @@ As JDBC batches do not support returning result sets, there is only one way to s
255255
for(int i=0;i<10;i++){
256256
queries.append("SELECT * FROM testcollection where k = "+ i + ";");
257257
}
258-
258+
259259
//send all select queries at onces
260260
ResultSet result = statement.executeQuery(queries.toString());
261261

262262
int nbRow = 0;
263-
ArrayList<Integer> ids = new ArrayList<Integer>();
263+
ArrayList<Integer> ids = new ArrayList<Integer>();
264264

265265
// get all results from all the select queries in a single result set
266-
while(result.next()){
266+
while(result.next()){
267267
ids.add(result.getInt("k"));
268268
}
269269

@@ -274,32 +274,32 @@ Working with Tuples and UDTs
274274
----------------------------
275275

276276
To create a new Tuple object in Java, use the TupleType.of().newValue() method.
277-
UDT fields cannot be instantiated outside of the Datastax Java driver core. If you want to use prepared statements, you must proceed as in the following example::
277+
UDT fields cannot be instantiated outside of the Datastax Java driver core. If you want to use prepared statements, you must proceed as in the following example::
278278

279279
String createUDT = "CREATE TYPE IF NOT EXISTS fieldmap (key text, value text )";
280-
280+
281281
String createCF = "CREATE COLUMNFAMILY t_udt (id bigint PRIMARY KEY, field_values frozen<fieldmap>, the_tuple frozen<tuple<int, text, float>>, the_other_tuple frozen<tuple<int, text, float>>);";
282282
stmt.execute(createUDT);
283283
stmt.execute(createCF);
284284
stmt.close();
285-
286-
285+
286+
287287
String insert = "INSERT INTO t_udt(id, field_values, the_tuple, the_other_tuple) values(?,{key : ?, value : ?}, (?,?,?),?);";
288-
289-
288+
289+
290290
TupleValue t = TupleType.of(DataType.cint(), DataType.text(), DataType.cfloat()).newValue();
291-
t.setInt(0, 1).setString(1, "midVal").setFloat(2, (float)2.0);
292-
293-
PreparedStatement pstatement = con.prepareStatement(insert);
294-
295-
pstatement.setLong(1, 1L);
296-
pstatement.setString(2, "key1");
291+
t.setInt(0, 1).setString(1, "midVal").setFloat(2, (float)2.0);
292+
293+
PreparedStatement pstatement = con.prepareStatement(insert);
294+
295+
pstatement.setLong(1, 1L);
296+
pstatement.setString(2, "key1");
297297
pstatement.setString(3, "value1");
298298
pstatement.setInt(4, 1);
299299
pstatement.setString(5, "midVal");
300300
pstatement.setFloat(6, (float) 2.0);
301301
pstatement.setObject(7, (Object)t);
302-
302+
303303
pstatement.execute();
304304
pstatement.close();
305305

@@ -311,15 +311,12 @@ When working on collections of UDT types, it is not possible to use prepared sta
311311
stmt.execute(createUDT);
312312
stmt.execute(createCF);
313313
stmt.close();
314-
314+
315315
System.out.println("con.getMetaData().getDatabaseProductName() = " + con.getMetaData().getDatabaseProductName());
316316
System.out.println("con.getMetaData().getDatabaseProductVersion() = " + con.getMetaData().getDatabaseProductVersion());
317317
System.out.println("con.getMetaData().getDriverName() = " + con.getMetaData().getDriverName());
318-
Statement statement = con.createStatement();
319-
318+
Statement statement = con.createStatement();
319+
320320
String insert = "INSERT INTO t_udt_tuple_coll(id,field_values,the_tuple, field_values_map, tuple_map) values(1,{{key : 'key1', value : 'value1'},{key : 'key2', value : 'value2'}}, [(1, 'midVal1', 1.0),(2, 'midVal2', 2.0)], {'map_key1':{key : 'key1', value : 'value1'},'map_key2':{key : 'key2', value : 'value2'}}, {'tuple1':(1, 2),'tuple2':(2,3)} );";
321321
statement.execute(insert);
322322
statement.close();
323-
324-
325-

pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
<groupId>com.github.adejanovski</groupId>
77
<artifactId>cassandra-jdbc-wrapper</artifactId>
8-
<version>3.0.3</version>
8+
<version>3.1.0</version>
99

1010
<packaging>jar</packaging>
1111
<name>Cassandra JDBC Wrapper</name>

src/main/java/com/github/adejanovski/cassandra/jdbc/AbstractResultSet.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ public InputStream getAsciiStream(String arg0) throws SQLException
5656
}
5757

5858

59-
public Blob getBlob(int arg0) throws SQLException
59+
/*public Blob getBlob(int arg0) throws SQLException
6060
{
6161
throw new SQLFeatureNotSupportedException(NOT_SUPPORTED);
6262
}
@@ -65,7 +65,7 @@ public Blob getBlob(String arg0) throws SQLException
6565
{
6666
throw new SQLFeatureNotSupportedException(NOT_SUPPORTED);
6767
}
68-
68+
*/
6969
public Reader getCharacterStream(int arg0) throws SQLException
7070
{
7171
throw new SQLFeatureNotSupportedException(NOT_SUPPORTED);

src/main/java/com/github/adejanovski/cassandra/jdbc/AbstractStatement.java

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -127,11 +127,7 @@ public void setCharacterStream(int parameterIndex, Reader reader) throws SQLExce
127127
throw new SQLFeatureNotSupportedException(NOT_SUPPORTED);
128128
}
129129

130-
public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException
131-
{
132-
throw new SQLFeatureNotSupportedException(NOT_SUPPORTED);
133-
}
134-
130+
135131
public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException
136132
{
137133
throw new SQLFeatureNotSupportedException(NOT_SUPPORTED);

src/main/java/com/github/adejanovski/cassandra/jdbc/CassandraMetadataResultSet.java

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1258,5 +1258,19 @@ public InputStream getBinaryStream(String columnLabel) throws SQLException {
12581258
return new ByteArrayInputStream(bytes);
12591259
}
12601260

1261+
@Override
1262+
public Blob getBlob(int index) throws SQLException {
1263+
checkIndex(index);
1264+
1265+
return new javax.sql.rowset.serial.SerialBlob(currentRow.getBytes(index-1).array());
1266+
}
1267+
1268+
@Override
1269+
public Blob getBlob(String columnName) throws SQLException {
1270+
checkName(columnName);
1271+
1272+
return new javax.sql.rowset.serial.SerialBlob(currentRow.getBytes(columnName).array());
1273+
}
1274+
12611275

12621276
}

0 commit comments

Comments
 (0)