diff --git a/.gitignore b/.gitignore
index aa66227..549bc0a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,10 @@ cassandra.home.unit-tests/
out/
lib_managed/
project/boot/
+/bin
+/.scala_dependencies
+/.project
+/.classpath
+/.settings
+/.cache
+/.history
diff --git a/README.textile b/README.textile
index 31b6da4..941d548 100644
--- a/README.textile
+++ b/README.textile
@@ -1,48 +1,35 @@
-h3. Project Status
+h1. Simple Scala based Cassandra client
-I am no longer actively developing Cascal due to some work changes and the non-existence of free-time at this point in my life. This is not to say I won't return to this project at some point in the future, however I can make no current commitments to quality on this. If you would like to fork and continue the project please feel free to do so. If you happen to do so, let me know and I'll link to your fork from this page. Cheers.
+Cascal fork with support for TTL, Secondary indexes, Composite Columns & Counters Columns
-h3. Cascal - Cassandra Simplified
-
-|_. Primary Author|Chris Shorrock|
-|_. Home Page|"http://wiki.github.com/shorrockin/cascal/":http://wiki.github.com/shorrockin/cascal/|
-|_. API Doc|"http://shorrockin.com/cascal/scaladocs/":http://shorrockin.com/cascal/scaladocs/|
-|_. Stable Version|1.2 (Scala 2.7.7)|
-|_. Snapshot Version|1.3-SNAPSHOT (Scala 2.8.0)|
-|_. Cassandra Version|0.6.1|
-
-
-h3. Description:
+h2. Description:
Cascal is a simple "Cassandra":http://cassandra.apache.org/ library built on the "Scala":www.scala-lang.org language that provides a very consistent and simple means by which you can interact with the Cassandra DB, providing simple means to:
* Insert / List / Get / Delete
-* Batch Insert / Delete
+* Batch Insert / Delete / Add
* Map Seq[Columns] Into Domain Objects
+* Set Column TTL
+* Secondary Indexes
+* Counters Columns
+* Experimental Static Composite Columns
* Utilize a Session Pool
One of the goals of Cassandra is to make the Thrift library operate in a way more conducive with the Scala language. As such when using Cascal you only need to used the Cascal libraries.
-Please see the "wiki pages":http://wiki.github.com/shorrockin/cascal/ for detailed usage information.
-
-h3. Maven Information
-
-
+Please see the "wiki pages":http://wiki.github.com/shorrockin/cascal/ for detailed usage information. Until I will update the wiki take a look at:
+* Column TTL: "TestStandardInesrtAndList":https://github.com/Shimi/cascal/blob/master/src/test/scala/com/shorrockin/cascal/TestStandardInesrtAndList.scala last test
+* Secondary index: "TestSecondaryIndex":https://github.com/Shimi/cascal/blob/master/src/test/scala/com/shorrockin/cascal/TestSecondaryIndex.scala
+* Counters columns: "CounterCoulmnTest":https://github.com/Shimi/cascal/blob/master/src/test/scala/com/shorrockin/cascal/CounterCoulmnTest.scala
+* Static Composite Columns: "CompositeTest":https://github.com/Shimi/cascal/blob/master/src/test/scala/com/shorrockin/cascal/CompositeTest.scala
+
+h2. How to build
+
+Download and install "sbt":https://github.com/harrah/xsbt, then run:
+
$ sbt package
+
+h2. Project Status
+
+master: Cassandra 1.1.x
+branch 1.0.0: Cassandra 1.0.x
+branch 0.8: Cassandra 0.8.x
\ No newline at end of file
diff --git a/build.sbt b/build.sbt
new file mode 100644
index 0000000..7162f92
--- /dev/null
+++ b/build.sbt
@@ -0,0 +1,28 @@
+name := "cascal"
+
+version := "1.3-SNAPSHOT"
+
+organization := "com.shorrockin"
+
+scalaVersion := "2.10.1"
+
+compileOrder := CompileOrder.JavaThenScala
+
+libraryDependencies ++= Seq(
+ "org.apache.cassandra" % "cassandra-all" % "1.1.10",
+ "org.apache.cassandra" % "cassandra-thrift" % "1.1.10",
+ "com.eaio.uuid" % "uuid" % "3.2",
+ "org.slf4j" % "slf4j-api" % "1.6.4",
+ "commons-pool" % "commons-pool" % "1.6",
+ "junit" % "junit" % "4.10" % "test"
+ )
+
+libraryDependencies += "com.novocode" % "junit-interface" % "0.7" % "test->default"
+
+publishTo <<= (version) { version: String =>
+ Some(Resolver.file("file", new File("/home/shimi/workspace/shimi-mvn-repo/") / {
+ if (version.trim.endsWith("SNAPSHOT")) "snapshots"
+ else "releases/" } ))
+}
+
+scalacOptions ++= Seq("-feature", "-Xlog-implicits")
diff --git a/pom.xml b/pom.xml
deleted file mode 100644
index 3ddfba7..0000000
--- a/pom.xml
+++ /dev/null
@@ -1,270 +0,0 @@
-
- 4.0.0
-
- com.shorrockin
- cascal
- jar
- 1.3-SNAPSHOT
- Cascal - Simple Cassandra Scala Client Library
- 2010
- http://github.com/shorrockin/cascal
-
-
-
- Chris Shorrock
- cshorrock
-
-
-
-
- 1.5
- 2.8.0
-
-
-
- src/main/scala
- src/test/scala
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
- ${java.compile.version}
- ${java.compile.version}
- UTF-8
-
-
-
-
- org.scala-tools
- maven-scala-plugin
- 2.14
-
-
- scala-compile-first
- process-resources
-
- add-source
- compile
-
-
-
- scala-test-compile
- process-test-resources
-
- testCompile
-
-
-
-
-
- ${scala.version}
-
- -target:jvm-${java.compile.version}
-
-
-
-
-
- org.scala-lang
- scala-library
- ${scala.version}
-
-
-
- org.scala-lang
- scala-compiler
- ${scala.version}
-
-
-
-
-
- org.apache.maven.plugins
- maven-source-plugin
-
-
- attach-sources
-
- jar
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-javadoc-plugin
-
-
- attach-javadocs
-
- jar
-
-
-
-
-
-
-
-
-
-
- shorrockin.repository
- Shorrockin Release Repo
- scpexe://maven@shorrockin.com/www/maven
- false
-
-
- shorrockin.repository
- Shorrockin Snapshot Repo
- scpexe://maven@shorrockin.com/www/maven
- false
-
-
- shorrockin.repository
- Shorrockin Site Deploy
- scpexe://maven@shorrockin.com/www/cascal
-
-
-
-
-
- maven
- Maven Main Repo
- http://repo1.maven.org/maven2/
-
-
-
- scala-tools.org-snapshots
- Scala-Tools Maven2 Repository
- http://scala-tools.org/repo-snapshots/
-
-
-
- scala-tools.org-releases
- Scala-Tools Maven2 Repository
- http://scala-tools.org/repo-releases/
-
-
-
- shorrockin.com
- Shorrockin Repository
- http://maven.shorrockin.com/
-
-
-
-
-
-
- org.scala-lang
- scala-library
- ${scala.version}
-
-
-
- org.apache.cassandra.deps
- libthrift
- 0.5.0
-
-
-
- com.eaio.uuid
- uuid
- 3.1
-
-
-
- org.apache.cassandra
- cassandra-all
- 0.7.0
-
-
-
- org.slf4j
- slf4j-api
- 1.5.8
-
-
-
- org.slf4j
- slf4j-log4j12
- 1.5.8
- test
-
-
-
- log4j
- log4j
- 1.2.14
- true
-
-
-
- commons-pool
- commons-pool
- 1.5.4
-
-
-
-
-
- junit
- junit
- 4.6
- test
-
-
-
- commons-lang
- commons-lang
- 2.4
- test
-
-
-
- commons-codec
- commons-codec
- 1.2
- test
-
-
-
- commons-collections
- commons-collections
- 3.2.1
- test
-
-
-
- com.google.clhm
- clhm-production
- 1.0
- test
-
-
-
- com.google.collections
- google-collections
- 1.0
- test
-
-
-
- flexjson
- flexjson
- 1.7
- test
-
-
-
- high-scale-lib
- high-scale-lib
- 1.0
- test
-
-
-
diff --git a/project/build.properties b/project/build.properties
deleted file mode 100644
index 9097b4b..0000000
--- a/project/build.properties
+++ /dev/null
@@ -1,8 +0,0 @@
-#Project properties
-#Sat Apr 10 07:52:16 PDT 2010
-project.organization=com.shorrockin
-project.name=cascal
-sbt.version=0.7.3
-project.version=N/A
-build.scala.versions=2.8.0
-project.initialize=false
diff --git a/project/build/CascalProject.scala b/project/build/CascalProject.scala
deleted file mode 100644
index 2c03c38..0000000
--- a/project/build/CascalProject.scala
+++ /dev/null
@@ -1,5 +0,0 @@
-import sbt._
-
-class CascalProject(info:ProjectInfo) extends DefaultProject(info) {
- val shorrockin = "Shorrockin Repository" at "http://maven.shorrockin.com"
-}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/Columns.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/Columns.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/Columns.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/Columns.java
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/Family.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/Family.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/Family.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/Family.java
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/Key.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/Key.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/Key.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/Key.java
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/Keyspace.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/Keyspace.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/Keyspace.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/Keyspace.java
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/Optional.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/Optional.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/Optional.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/Optional.java
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/Super.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/Super.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/Super.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/Super.java
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/SuperColumn.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/SuperColumn.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/SuperColumn.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/SuperColumn.java
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/annotations/Value.java b/src/main/java/com/shorrockin/cascal/serialization/annotations/Value.java
similarity index 100%
rename from src/main/scala/com/shorrockin/cascal/serialization/annotations/Value.java
rename to src/main/java/com/shorrockin/cascal/serialization/annotations/Value.java
diff --git a/src/main/scala/com/shorrockin/cascal/model/Column.scala b/src/main/scala/com/shorrockin/cascal/model/Column.scala
index 40e3a83..8a77c45 100644
--- a/src/main/scala/com/shorrockin/cascal/model/Column.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/Column.scala
@@ -19,11 +19,13 @@ import com.shorrockin.cascal.utils.Utils.now
case class Column[Owner](val name:ByteBuffer,
val value:ByteBuffer,
val time:Long,
- val owner:Owner) extends Gettable[Column[Owner]] {
+ val ttl:Option[Int],
+ val owner:Owner) extends Gettable[Column[Owner], ByteBuffer] {
- def this(name:ByteBuffer, value:ByteBuffer, owner:Owner) = this(name, value, now, owner)
- def this(name:ByteBuffer, owner:Owner) = this(name, null, now, owner)
- def this(name:ByteBuffer, value:ByteBuffer, date:Date, owner:Owner) = this(name, value, date.getTime, owner)
+ def this(name:ByteBuffer, value:ByteBuffer, owner:Owner) = this(name, value, now, None, owner)
+ def this(name:ByteBuffer, owner:Owner) = this(name, null, now, None, owner)
+ def this(name:ByteBuffer, value:ByteBuffer, date:Date, owner:Owner) = this(name, value, date.getTime, None, owner)
+ def this(name:ByteBuffer, value:ByteBuffer, time:Long, owner:Owner) = this(name, value, time, None, owner)
val partial = (value == null)
@@ -42,9 +44,9 @@ case class Column[Owner](val name:ByteBuffer,
lazy val columnOrSuperColumn = {
val cosc = new ColumnOrSuperColumn
owner match {
- case key:StandardKey => cosc.setColumn(new CassColumn(name, value, time))
+ case key:StandardKey => cosc.setColumn(cassandraColumn())
case sup:SuperColumn =>
- val list = Conversions.toJavaList(new CassColumn(name, value, time) :: Nil)
+ val list = Conversions.toJavaList(cassandraColumn() :: Nil)
cosc.setSuper_column(new CassSuperColumn(sup.value, list))
}
}
@@ -54,8 +56,9 @@ case class Column[Owner](val name:ByteBuffer,
* copy method to create a new instance of this column with a new value and
* the same other values.
*/
- def \(newValue:ByteBuffer) = new Column[Owner](name, newValue, time, owner)
+ def \(newValue:ByteBuffer) = new Column[Owner](name, newValue, time, ttl, owner)
+ def !(newTtl:Int) = new Column[Owner](name, value, time, Option(newTtl), owner)
/**
* appends a column onto this one forming a list
@@ -69,14 +72,29 @@ case class Column[Owner](val name:ByteBuffer,
*/
def convertGetResult(colOrSuperCol:ColumnOrSuperColumn):Column[Owner] = {
val col = colOrSuperCol.getColumn
- Column(ByteBuffer.wrap(col.getName), ByteBuffer.wrap(col.getValue), col.getTimestamp, owner)
+ val ttl = if (col.isSetTtl) {
+ Some(col.getTtl)
+ } else {
+ None
+ }
+ Column(ByteBuffer.wrap(col.getName), ByteBuffer.wrap(col.getValue), col.getTimestamp, ttl ,owner)
}
private def stringIfPossible(a:ByteBuffer):String = {
if (a == null) return "NULL"
- if (a.array.length <= 4) return "Array (" + a.array.mkString(", ") + ")"
+ if (a.array.length <= 4) return "Array (" + byteArrayOps(a.array).mkString(", ") + ")"
if (a.array.length > 1000) return a.array.toString
- try { Conversions.string(a) } catch { case _ => a.array.toString }
+ try { Conversions.string(a) } catch { case _:Throwable => a.array.toString }
+ }
+
+ def cassandraColumn(): CassColumn = {
+ val cassCol = new CassColumn(name)
+ cassCol.setValue(value)
+ cassCol.setTimestamp(time)
+ for (timeToLive <- ttl) {
+ cassCol.setTtl(timeToLive)
+ }
+ cassCol
}
override def toString():String = "%s \\ Column(name = %s, value = %s, time = %s)".format(
diff --git a/src/main/scala/com/shorrockin/cascal/model/ColumnFamily.scala b/src/main/scala/com/shorrockin/cascal/model/ColumnFamily.scala
index 22ee0d5..cfa968a 100644
--- a/src/main/scala/com/shorrockin/cascal/model/ColumnFamily.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/ColumnFamily.scala
@@ -15,5 +15,4 @@ trait ColumnFamily[+KeyType] extends StringValue {
lazy val columnParent = new ColumnParent().setColumn_family(value)
def \(value:String):KeyType
-
}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/model/CounterColumn.scala b/src/main/scala/com/shorrockin/cascal/model/CounterColumn.scala
new file mode 100644
index 0000000..152e2bb
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/model/CounterColumn.scala
@@ -0,0 +1,50 @@
+package com.shorrockin.cascal.model
+
+import java.nio.ByteBuffer
+import org.apache.cassandra.thrift.{ColumnPath, ColumnOrSuperColumn}
+import org.apache.cassandra.thrift.{CounterColumn => CassandraCounterColumn}
+import org.apache.cassandra.thrift.{CounterSuperColumn => CassandraCounterSuperColumn}
+import com.shorrockin.cascal.utils.Conversions
+import scala.collection.JavaConversions._
+
+case class CounterColumn[Owner](val name:ByteBuffer,
+ val value:Option[Long],
+ val owner:Owner) extends Gettable[CounterColumn[Owner], Option[Long]] {
+
+ def this(name:ByteBuffer, owner:Owner) = this(name, None, owner)
+
+ val key = owner.asInstanceOf[ColumnContainer[_, _]].key
+ val family = key.family
+ val keyspace = key.keyspace
+
+ lazy val columnPath = {
+ val out = new ColumnPath(family.value)
+ owner match {
+ case owner:CounterSuperColumn => out.setColumn(name).setSuper_column(owner.value)
+ case key:CounterStandardKey => out.setColumn(name)
+ }
+ }
+
+ def +(newValue:Long) = new CounterColumn[Owner](name, Some(newValue), owner)
+
+ def -(newValue:Long) = new CounterColumn[Owner](name, Some(-1 * newValue), owner)
+
+ def convertGetResult(colOrSuperCol:ColumnOrSuperColumn):CounterColumn[Owner] = {
+ val col = colOrSuperCol.getCounter_column
+ CounterColumn(ByteBuffer.wrap(col.getName), Some(col.getValue), owner)
+ }
+
+ def cassandraColumn(): CassandraCounterColumn = new CassandraCounterColumn(name, value.get)
+
+ lazy val columnOrSuperColumn = {
+ val cosc = new ColumnOrSuperColumn
+ owner match {
+ case key:CounterStandardKey => cosc.setCounter_column(cassandraColumn())
+ case sup:CounterSuperColumn =>
+ cosc.setCounter_super_column(new CassandraCounterSuperColumn(sup.value, cassandraColumn() :: Nil))
+ }
+ }
+
+ override def toString():String = "%s \\ Column(name = %s, value = %s)".format(
+ owner.toString, Conversions.string(name), value)
+}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/model/CounterStandardColumnFamily.scala b/src/main/scala/com/shorrockin/cascal/model/CounterStandardColumnFamily.scala
new file mode 100644
index 0000000..b860dc4
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/model/CounterStandardColumnFamily.scala
@@ -0,0 +1,7 @@
+package com.shorrockin.cascal.model
+
+case class CounterStandardColumnFamily(val value:String, val keyspace:Keyspace) extends ColumnFamily[CounterStandardKey] {
+ def \(value:String) = new CounterStandardKey(value, this)
+
+ override def toString = "%s \\ StandardCounterColumnFamily(value = %s)".format(keyspace.toString, value)
+}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/model/CounterStandardKey.scala b/src/main/scala/com/shorrockin/cascal/model/CounterStandardKey.scala
new file mode 100644
index 0000000..a3a8f61
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/model/CounterStandardKey.scala
@@ -0,0 +1,20 @@
+package com.shorrockin.cascal.model
+import java.nio.ByteBuffer
+import org.apache.cassandra.thrift.ColumnOrSuperColumn
+
+case class CounterStandardKey(val value:String, val family:CounterStandardColumnFamily)
+ extends Key[CounterColumn[CounterStandardKey], Seq[CounterColumn[CounterStandardKey]]]
+ with StandardColumnContainer[CounterColumn[CounterStandardKey], Seq[CounterColumn[CounterStandardKey]], Long] {
+
+ def \(name:ByteBuffer) = new CounterColumn(name, None, this)
+ def \(name:ByteBuffer, value:Long) = new CounterColumn(name, Some(value), this)
+
+ def convertListResult(results:Seq[ColumnOrSuperColumn]):Seq[CounterColumn[CounterStandardKey]] = {
+ results.map { (result) =>
+ val column = result.getCounter_column
+ \(ByteBuffer.wrap(column.getName), column.getValue)
+ }
+ }
+
+ override def toString = "%s \\ StandardCounterKey(value = %s)".format(family.toString, value)
+}
diff --git a/src/main/scala/com/shorrockin/cascal/model/CounterSuperColumn.scala b/src/main/scala/com/shorrockin/cascal/model/CounterSuperColumn.scala
new file mode 100644
index 0000000..df383f4
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/model/CounterSuperColumn.scala
@@ -0,0 +1,47 @@
+package com.shorrockin.cascal.model
+import java.nio.ByteBuffer
+import org.apache.cassandra.thrift.ColumnParent
+import org.apache.cassandra.thrift.ColumnPath
+import org.apache.cassandra.thrift.ColumnOrSuperColumn
+import scala.collection.JavaConversions._
+import com.shorrockin.cascal.utils.Conversions
+
+class CounterSuperColumn(val value:ByteBuffer, val key:CounterSuperKey) extends Gettable[Seq[CounterColumn[CounterSuperColumn]], ByteBuffer]
+ with StandardColumnContainer[CounterColumn[CounterSuperColumn], Seq[CounterColumn[CounterSuperColumn]], Long] {
+
+ def \(name:ByteBuffer) = new CounterColumn(name, this)
+ def \(name:ByteBuffer, value:Long) = new CounterColumn(name, Some(value), this)
+
+ val family = key.family
+ val keyspace = family.keyspace
+
+ lazy val columnParent = new ColumnParent(family.value).setSuper_column(value)
+ lazy val columnPath = new ColumnPath(family.value).setSuper_column(value)
+
+ def ::(other:CounterSuperColumn):List[CounterSuperColumn] = other :: this :: Nil
+
+ /**
+ * given the returned object from the get request, convert
+ * to our return type.
+ */
+ def convertGetResult(colOrSuperCol:ColumnOrSuperColumn):Seq[CounterColumn[CounterSuperColumn]] = {
+ val counterSuperCol = colOrSuperCol.getCounter_super_column
+ counterSuperCol.getColumns.map { column =>
+ \ (ByteBuffer.wrap(column.getName), column.getValue)
+ }.toSeq
+ }
+
+ /**
+ * given the return object from the list request, convert it to
+ * our return type
+ */
+ def convertListResult(results:Seq[ColumnOrSuperColumn]):Seq[CounterColumn[CounterSuperColumn]] = {
+ results.map { result =>
+ val counterColumn = result.getCounter_column
+ \(ByteBuffer.wrap(counterColumn.getName), counterColumn.getValue)
+ }
+ }
+
+ override def toString():String = "%s \\ CounterSuperColumn(value = %s)".format(
+ key.toString, Conversions.string(value))
+}
diff --git a/src/main/scala/com/shorrockin/cascal/model/CounterSuperColumnFamily.scala b/src/main/scala/com/shorrockin/cascal/model/CounterSuperColumnFamily.scala
new file mode 100644
index 0000000..5cedca8
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/model/CounterSuperColumnFamily.scala
@@ -0,0 +1,6 @@
+package com.shorrockin.cascal.model
+
+class CounterSuperColumnFamily(val value:String, val keyspace:Keyspace) extends ColumnFamily[CounterSuperKey] {
+ def \(value:String) = new CounterSuperKey(value, this)
+ override def toString = "%s \\\\ CounterSuperColumnFamily(value = %s)".format(keyspace.toString, value)
+}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/model/CounterSuperKey.scala b/src/main/scala/com/shorrockin/cascal/model/CounterSuperKey.scala
new file mode 100644
index 0000000..a7926d2
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/model/CounterSuperKey.scala
@@ -0,0 +1,27 @@
+package com.shorrockin.cascal.model
+
+import java.nio.ByteBuffer
+import org.apache.cassandra.thrift.ColumnOrSuperColumn
+import scala.collection.JavaConversions._
+
+class CounterSuperKey(val value:String, val family:CounterSuperColumnFamily)
+ extends Key[CounterSuperColumn, Seq[(CounterSuperColumn, Seq[CounterColumn[CounterSuperColumn]])]] {
+
+ def \(value:ByteBuffer) = new CounterSuperColumn(value, this)
+
+ /**
+ * converts a list of super columns to the specified return type
+ */
+ def convertListResult(results: Seq[ColumnOrSuperColumn]): Seq[(CounterSuperColumn, Seq[CounterColumn[CounterSuperColumn]])] = {
+ results.map { result =>
+ val nativeCounterSuperCol = result.getCounter_super_column
+ val counterSuperColumn = this \ ByteBuffer.wrap(nativeCounterSuperCol.getName)
+ val columns = nativeCounterSuperCol.getColumns.map { column =>
+ counterSuperColumn \ (ByteBuffer.wrap(column.getName), column.getValue)
+ }
+ (counterSuperColumn -> columns)
+ }
+ }
+
+ override def toString = "%s \\ CounterSuperKey(value = %s)".format(family.toString, value)
+}
diff --git a/src/main/scala/com/shorrockin/cascal/model/Gettable.scala b/src/main/scala/com/shorrockin/cascal/model/Gettable.scala
index a7589b0..b5d1610 100644
--- a/src/main/scala/com/shorrockin/cascal/model/Gettable.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/Gettable.scala
@@ -10,7 +10,7 @@ import org.apache.cassandra.thrift.{ColumnPath, ColumnOrSuperColumn}
* @param Result determines the type of object returned when this
* column is looked up through the session get method.
*/
-trait Gettable[Result] extends ByteValue {
+trait Gettable[Result, ValueType] extends PathComponent[ValueType] {
val key:Key[_, _]
val keyspace:Keyspace
val family:ColumnFamily[_]
diff --git a/src/main/scala/com/shorrockin/cascal/model/IndexQuery.scala b/src/main/scala/com/shorrockin/cascal/model/IndexQuery.scala
new file mode 100644
index 0000000..b4538ee
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/model/IndexQuery.scala
@@ -0,0 +1,66 @@
+package com.shorrockin.cascal.model
+
+import scala.language.implicitConversions
+import org.apache.cassandra.thrift.IndexClause
+import java.nio.ByteBuffer
+import org.apache.cassandra.thrift.{IndexExpression => CassIndexExpression}
+import org.apache.cassandra.thrift.{IndexOperator => ThriftOperator}
+import scala.collection.JavaConversions._
+
+case class IndexExpression(val columnName: ByteBuffer, val operator: ThriftOperator, val value: ByteBuffer)
+
+case class IndexQuery(val family: ColumnFamily[StandardKey], val expressions: List[IndexExpression],
+ val startKey: ByteBuffer, val limit: Int = 100) {
+
+ val indexClause = new IndexClause();
+
+ implicit def CascalIndexExpression(expression: IndexExpression): CassIndexExpression =
+ new CassIndexExpression(expression.columnName, expression.operator, expression.value)
+
+ implicit def CascalIndexExpressionList(expressions: List[IndexExpression]): List[CassIndexExpression] =
+ expressions.map(CascalIndexExpression(_))
+
+ var ex = CascalIndexExpressionList(expressions)
+ indexClause.setExpressions(ex)
+ indexClause.setStart_key(startKey)
+ indexClause.setCount(limit)
+
+ def limit(limit: Int) = IndexQuery(family, expressions, startKey, limit)
+}
+
+object IndexQuery {
+
+ class IndexExpressionHelper(val queryHelper: IndexQueryHelper,
+ val colName: ByteBuffer) {
+
+ def startAt(startAT: ByteBuffer): IndexQuery = {
+ queryHelper.startAt(startAT)
+ }
+
+ private def thriftOperator(value: ByteBuffer, operator: ThriftOperator) = {
+ val expression = IndexExpression(colName, operator, value)
+ queryHelper += expression
+ queryHelper
+ }
+
+ def Eq(value: ByteBuffer) = thriftOperator(value, ThriftOperator.EQ)
+ def Gt(value: ByteBuffer) = thriftOperator(value, ThriftOperator.GT)
+ def Gte(value: ByteBuffer) = thriftOperator(value, ThriftOperator.GTE)
+ def Lt(value: ByteBuffer) = thriftOperator(value, ThriftOperator.LT)
+ def Lte(value: ByteBuffer) = thriftOperator(value, ThriftOperator.LTE)
+ }
+
+ class IndexQueryHelper(val family: ColumnFamily[StandardKey]) {
+
+ var expressions: List[IndexExpression] = List()
+
+ def startAt(startAt: ByteBuffer) = IndexQuery(family, expressions, startAt)
+
+ def and(columnName: ByteBuffer) = new IndexExpressionHelper(this, columnName)
+
+ def +=(expression: IndexExpression) = expressions = expression :: expressions
+ }
+
+ def indexQueryHelper(columnFamily: ColumnFamily[StandardKey], columnName: ByteBuffer) = new IndexExpressionHelper(new IndexQueryHelper(columnFamily), columnName)
+
+}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/model/Keyspace.scala b/src/main/scala/com/shorrockin/cascal/model/Keyspace.scala
index 79edd71..0b90637 100644
--- a/src/main/scala/com/shorrockin/cascal/model/Keyspace.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/Keyspace.scala
@@ -14,6 +14,9 @@ import java.nio.ByteBuffer
*/
case class Keyspace(val value:String) extends StringValue {
def \(value:String):StandardColumnFamily = new StandardColumnFamily(value, this)
+ def \#(value:String):CounterStandardColumnFamily = new CounterStandardColumnFamily(value, this)
def \\(value:String):SuperColumnFamily = new SuperColumnFamily(value, this)
+ def \\#(value:String):CounterSuperColumnFamily = new CounterSuperColumnFamily(value, this)
+
override def toString = "Keyspace(value = %s)".format(value)
}
diff --git a/src/main/scala/com/shorrockin/cascal/model/StandardColumnContainer.scala b/src/main/scala/com/shorrockin/cascal/model/StandardColumnContainer.scala
index 5bfa5a4..9cc7467 100644
--- a/src/main/scala/com/shorrockin/cascal/model/StandardColumnContainer.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/StandardColumnContainer.scala
@@ -7,8 +7,7 @@ import java.nio.ByteBuffer
*
* @author Chris Shorrock
*/
-trait StandardColumnContainer[ColumnType, SliceType] extends ColumnContainer[ColumnType, SliceType] {
+trait StandardColumnContainer[ColumnType, SliceType, ValueType] extends ColumnContainer[ColumnType, SliceType] {
def \(name:ByteBuffer):ColumnType
- def \(name:ByteBuffer, value:ByteBuffer):ColumnType
- def \(name:ByteBuffer, value:ByteBuffer, time:Long):ColumnType
+ def \(name:ByteBuffer, value:ValueType):ColumnType
}
diff --git a/src/main/scala/com/shorrockin/cascal/model/StandardColumnFamily.scala b/src/main/scala/com/shorrockin/cascal/model/StandardColumnFamily.scala
index 977363f..e30e786 100644
--- a/src/main/scala/com/shorrockin/cascal/model/StandardColumnFamily.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/StandardColumnFamily.scala
@@ -1,4 +1,5 @@
package com.shorrockin.cascal.model
+import java.nio.ByteBuffer
/**
* abstraction for the standard column family. a standard column family
@@ -8,5 +9,9 @@ package com.shorrockin.cascal.model
*/
case class StandardColumnFamily(val value:String, val keyspace:Keyspace) extends ColumnFamily[StandardKey] {
def \(value:String) = new StandardKey(value, this)
+
override def toString = "%s \\ StandardColumnFamily(value = %s)".format(keyspace.toString, value)
+
+ import com.shorrockin.cascal.model.IndexQuery._
+ def where(columnName: ByteBuffer) = indexQueryHelper(this, columnName)
}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/model/StandardKey.scala b/src/main/scala/com/shorrockin/cascal/model/StandardKey.scala
index 50f4e1e..086a60d 100644
--- a/src/main/scala/com/shorrockin/cascal/model/StandardKey.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/StandardKey.scala
@@ -11,7 +11,7 @@ import org.apache.cassandra.thrift.{ColumnOrSuperColumn}
* @author Chris Shorrock
*/
case class StandardKey(val value:String, val family:StandardColumnFamily) extends Key[Column[StandardKey], Seq[Column[StandardKey]]]
- with StandardColumnContainer[Column[StandardKey], Seq[Column[StandardKey]]] {
+ with StandardColumnContainer[Column[StandardKey], Seq[Column[StandardKey]], ByteBuffer] {
def \(name:ByteBuffer) = new Column(name, this)
def \(name:ByteBuffer, value:ByteBuffer) = new Column(name, value, this)
@@ -25,4 +25,4 @@ case class StandardKey(val value:String, val family:StandardColumnFamily) extend
}
override def toString = "%s \\ StandardKey(value = %s)".format(family.toString, value)
-}
+}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/model/SuperColumn.scala b/src/main/scala/com/shorrockin/cascal/model/SuperColumn.scala
index 2ab2a49..939d360 100644
--- a/src/main/scala/com/shorrockin/cascal/model/SuperColumn.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/SuperColumn.scala
@@ -11,8 +11,9 @@ import com.shorrockin.cascal.utils.Conversions
*
* @author Chris Shorrock
*/
-case class SuperColumn(val value:ByteBuffer, val key:SuperKey) extends Gettable[Seq[Column[SuperColumn]]]()
- with StandardColumnContainer[Column[SuperColumn], Seq[Column[SuperColumn]]] {
+case class SuperColumn(val value:ByteBuffer, val key:SuperKey) extends Gettable[Seq[Column[SuperColumn]], ByteBuffer]
+ with StandardColumnContainer[Column[SuperColumn], Seq[Column[SuperColumn]], ByteBuffer] {
+
def \(name:ByteBuffer) = new Column(name, this)
def \(name:ByteBuffer, value:ByteBuffer) = new Column(name, value, this)
def \(name:ByteBuffer, value:ByteBuffer, time:Long) = new Column(name, value, time, this)
@@ -26,7 +27,7 @@ case class SuperColumn(val value:ByteBuffer, val key:SuperKey) extends Gettable[
def ::(other:SuperColumn):List[SuperColumn] = other :: this :: Nil
private def convertList[T](v:java.util.List[T]):List[T] = {
- scala.collection.JavaConversions.asBuffer(v).toList
+ scala.collection.JavaConversions.asScalaBuffer(v).toList
}
/**
@@ -51,9 +52,9 @@ case class SuperColumn(val value:ByteBuffer, val key:SuperKey) extends Gettable[
}
private def stringIfPossible(a:ByteBuffer):String = {
- if (a.array.length <= 4) return "Array (" + a.array.mkString(", ") + ")"
+ if (a.array.length <= 4) return "Array (" + byteArrayOps(a.array).mkString(", ") + ")"
if (a.array.length > 1000) return a.array.toString
- try { Conversions.string(a) } catch { case _ => a.array.toString }
+ try { Conversions.string(a) } catch { case _:Throwable => a.array.toString }
}
override def toString():String = "%s \\ SuperColumn(value = %s)".format(
diff --git a/src/main/scala/com/shorrockin/cascal/model/SuperKey.scala b/src/main/scala/com/shorrockin/cascal/model/SuperKey.scala
index 711fdf6..40ffd7f 100644
--- a/src/main/scala/com/shorrockin/cascal/model/SuperKey.scala
+++ b/src/main/scala/com/shorrockin/cascal/model/SuperKey.scala
@@ -22,7 +22,7 @@ case class SuperKey(val value:String, val family:SuperColumnFamily) extends Key[
}
private def convertList[T](v:java.util.List[T]):List[T] = {
- scala.collection.JavaConversions.asBuffer(v).toList
+ scala.collection.JavaConversions.asScalaBuffer(v).toList
}
override def toString = "%s \\ SuperKey(value = %s)".format(family.toString, value)
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/Converter.scala b/src/main/scala/com/shorrockin/cascal/serialization/Converter.scala
index 4c9f2b3..fe1a893 100755
--- a/src/main/scala/com/shorrockin/cascal/serialization/Converter.scala
+++ b/src/main/scala/com/shorrockin/cascal/serialization/Converter.scala
@@ -1,5 +1,6 @@
package com.shorrockin.cascal.serialization
+import scala.language.existentials
import java.nio.ByteBuffer
import reflect.Manifest
import java.lang.annotation.Annotation
@@ -102,14 +103,14 @@ class Converter(serializers:Map[Class[_], Serializer[_]]) {
*/
private def getFieldSerialized[T](fieldType:Class[_], fieldGetter:Method, obj:T):ByteBuffer = {
// Couldn't figure out how to case match classes on a class obj with type erasure
- if (fieldType == classOf[String]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[String])
- else if (fieldType == classOf[UUID]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[UUID])
- else if (fieldType == classOf[Int]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[Int])
- else if (fieldType == classOf[Long]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[Long])
- else if (fieldType == classOf[Boolean]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[Boolean])
- else if (fieldType == classOf[Float]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[Float])
- else if (fieldType == classOf[Double]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[Double])
- else if (fieldType == classOf[Date]) Conversions.bytes(fieldGetter.invoke(obj).asInstanceOf[Date])
+ if (fieldType == classOf[String]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[String])
+ else if (fieldType == classOf[UUID]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[UUID])
+ else if (fieldType == classOf[Int]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[Int])
+ else if (fieldType == classOf[Long]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[Long])
+ else if (fieldType == classOf[Boolean]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[Boolean])
+ else if (fieldType == classOf[Float]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[Float])
+ else if (fieldType == classOf[Double]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[Double])
+ else if (fieldType == classOf[Date]) Conversions.byteBuffer(fieldGetter.invoke(obj).asInstanceOf[Date])
else throw new IllegalStateException("Type %s of getter %s is unknown".format(fieldGetter.getName, fieldType.toString))
}
@@ -122,14 +123,14 @@ class Converter(serializers:Map[Class[_], Serializer[_]]) {
val opt = fieldGetter.invoke(obj).asInstanceOf[Option[_]]
opt match {
case None => null
- case Some(x:String) => Conversions.bytes(x)
- case Some(x:UUID) => Conversions.bytes(x)
- case Some(x:Int) => Conversions.bytes(x)
- case Some(x:Long) => Conversions.bytes(x)
- case Some(x:Boolean) => Conversions.bytes(x)
- case Some(x:Float) => Conversions.bytes(x)
- case Some(x:Double) => Conversions.bytes(x)
- case Some(x:Date) => Conversions.bytes(x)
+ case Some(x:String) => Conversions.byteBuffer(x)
+ case Some(x:UUID) => Conversions.byteBuffer(x)
+ case Some(x:Int) => Conversions.byteBuffer(x)
+ case Some(x:Long) => Conversions.byteBuffer(x)
+ case Some(x:Boolean) => Conversions.byteBuffer(x)
+ case Some(x:Float) => Conversions.byteBuffer(x)
+ case Some(x:Double) => Conversions.byteBuffer(x)
+ case Some(x:Date) => Conversions.byteBuffer(x)
case _ => throw new IllegalStateException(
"Type of Option %s for getter %s is unknown".format(opt.toString, fieldGetter.getName))
}
@@ -181,8 +182,8 @@ class Converter(serializers:Map[Class[_], Serializer[_]]) {
if (columnName == null || value == null) acc
else info.isSuper match {
- case true => (info.family.asInstanceOf[SuperColumnFamily] \ key \ superCol \ (Conversions.bytes(columnName), value)) :: acc
- case false => (info.family.asInstanceOf[StandardColumnFamily] \ key \ (Conversions.bytes(columnName), value)) :: acc
+ case true => (info.family.asInstanceOf[SuperColumnFamily] \ key \ superCol \ (Conversions.byteBuffer(columnName), value)) :: acc
+ case false => (info.family.asInstanceOf[StandardColumnFamily] \ key \ (Conversions.byteBuffer(columnName), value)) :: acc
}
}
}
@@ -208,7 +209,7 @@ class Converter(serializers:Map[Class[_], Serializer[_]]) {
* returns the column with the specified name, or
*/
private def find(name:String, columns:Seq[Column[_]]):Option[Column[_]] = {
- val nameBytes = Conversions.bytes(name)
+ val nameBytes = Conversions.byteBuffer(name)
columns.find { (c) => nameBytes.equals(c.name) }
}
@@ -224,7 +225,7 @@ class Converter(serializers:Map[Class[_], Serializer[_]]) {
// TODO sure there's a better way - without this you end up with:
// "value asInstanceOf is not a member of ?"
val castedSerial = s.asInstanceOf[Serializer[Any]]
- (castedSerial.fromBytes(bytes)).asInstanceOf[A]
+ (castedSerial.fromByteBuffer(bytes)).asInstanceOf[A]
}
}
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/Serializer.scala b/src/main/scala/com/shorrockin/cascal/serialization/Serializer.scala
index 01f22f2..6b3469b 100755
--- a/src/main/scala/com/shorrockin/cascal/serialization/Serializer.scala
+++ b/src/main/scala/com/shorrockin/cascal/serialization/Serializer.scala
@@ -1,10 +1,13 @@
package com.shorrockin.cascal.serialization
-import com.shorrockin.cascal.utils.{UUID => UUIDUtils}
+import com.shorrockin.cascal.utils.{UUID => UUIDUtils, ThreadLocal}
import java.util.UUID
import java.util.Date
import java.nio.charset.Charset
+import java.nio.charset.CharsetDecoder
+import java.nio.charset.CharsetEncoder
import java.nio.{ByteBuffer,CharBuffer}
+import scala.util.DynamicVariable
object Serializer {
@@ -31,10 +34,10 @@ object Serializer {
*/
trait Serializer[A] {
/** converts this object to a byte array for entry into cassandra */
- def toBytes(obj:A):ByteBuffer
+ def toByteBuffer(obj:A):ByteBuffer
/** converts the specified byte array into an object */
- def fromBytes(bytes:ByteBuffer):A
+ def fromByteBuffer(bytes:ByteBuffer):A
/** converts the specified value to a string */
def toString(obj:A):String
@@ -45,21 +48,25 @@ trait Serializer[A] {
object StringSerializer extends Serializer[String] {
val utf8 = Charset.forName("UTF-8")
- val decoder = utf8.newDecoder
- val encoder = utf8.newEncoder
+ private val decoder = new ThreadLocal(utf8.newDecoder)
+ private val encoder = new ThreadLocal(utf8.newEncoder)
- def toBytes(str:String) = encoder.encode(CharBuffer.wrap(str.toCharArray))
- def fromBytes(bytes:ByteBuffer) = decoder.decode(bytes).toString
+ def toByteBuffer(str:String) = encoder.withValue {
+ _.encode(CharBuffer.wrap(str.toCharArray))
+ }
+ def fromByteBuffer(bytes:ByteBuffer) = decoder.withValue {
+ _.decode(bytes).toString
+ }
def toString(str:String) = str
def fromString(str:String) = str
}
object UUIDSerializer extends Serializer[UUID] {
- def fromBytes(bytes:ByteBuffer) = UUIDUtils(bytes.array)
+ def fromByteBuffer(bytes:ByteBuffer) = UUIDUtils(bytes.array)
def toString(uuid:UUID) = uuid.toString
def fromString(str:String) = UUID.fromString(str)
- def toBytes(uuid:UUID) = {
+ def toByteBuffer(uuid:UUID) = {
val msb = uuid.getMostSignificantBits()
val lsb = uuid.getLeastSignificantBits()
val buffer = new Array[Byte](16)
@@ -75,8 +82,8 @@ object UUIDSerializer extends Serializer[UUID] {
object IntSerializer extends Serializer[Int] {
val bytesPerInt = java.lang.Integer.SIZE / java.lang.Byte.SIZE
- def toBytes(i:Int) = ByteBuffer.allocate(bytesPerInt).putInt(i)
- def fromBytes(bytes:ByteBuffer) = bytes.getInt
+ def toByteBuffer(i:Int) = ByteBuffer.allocate(bytesPerInt).putInt(0, i)
+ def fromByteBuffer(bytes:ByteBuffer) = bytes.getInt
def toString(obj:Int) = obj.toString
def fromString(str:String) = str.toInt
}
@@ -84,15 +91,15 @@ object IntSerializer extends Serializer[Int] {
object LongSerializer extends Serializer[Long] {
val bytesPerLong = java.lang.Long.SIZE / java.lang.Byte.SIZE
- def toBytes(l:Long) = ByteBuffer.allocate(bytesPerLong).putLong(l)
- def fromBytes(bytes:ByteBuffer) = bytes.getLong()
+ def toByteBuffer(l:Long) = ByteBuffer.allocate(bytesPerLong).putLong(0, l)
+ def fromByteBuffer(bytes:ByteBuffer) = bytes.getLong
def toString(obj:Long) = obj.toString
def fromString(str:String) = str.toLong
}
object BooleanSerializer extends Serializer[Boolean] {
- def toBytes(b:Boolean) = StringSerializer.toBytes(b.toString)
- def fromBytes(bytes:ByteBuffer) = StringSerializer.fromBytes(bytes).toBoolean
+ def toByteBuffer(b:Boolean) = StringSerializer.toByteBuffer(b.toString)
+ def fromByteBuffer(bytes:ByteBuffer) = StringSerializer.fromByteBuffer(bytes).toBoolean
def toString(obj:Boolean) = obj.toString
def fromString(str:String) = str.toBoolean
}
@@ -100,8 +107,8 @@ object BooleanSerializer extends Serializer[Boolean] {
object FloatSerializer extends Serializer[Float] {
val bytesPerFloat = java.lang.Float.SIZE / java.lang.Byte.SIZE
- def toBytes(f:Float) = ByteBuffer.allocate(bytesPerFloat).putFloat(f)
- def fromBytes(bytes:ByteBuffer) = bytes.getFloat()
+ def toByteBuffer(f:Float) = ByteBuffer.allocate(bytesPerFloat).putFloat(0, f)
+ def fromByteBuffer(bytes:ByteBuffer) = bytes.getFloat()
def toString(obj:Float) = obj.toString
def fromString(str:String) = str.toFloat
}
@@ -109,15 +116,15 @@ object FloatSerializer extends Serializer[Float] {
object DoubleSerializer extends Serializer[Double] {
val bytesPerDouble = java.lang.Double.SIZE / java.lang.Byte.SIZE
- def toBytes(d:Double) = ByteBuffer.allocate(bytesPerDouble).putDouble(d)
- def fromBytes(bytes:ByteBuffer) = bytes.getDouble
+ def toByteBuffer(d:Double) = ByteBuffer.allocate(bytesPerDouble).putDouble(0, d)
+ def fromByteBuffer(bytes:ByteBuffer) = bytes.getDouble
def toString(obj:Double) = obj.toString
def fromString(str:String) = str.toDouble
}
object DateSerializer extends Serializer[Date] {
- def toBytes(date:Date) = LongSerializer.toBytes(date.getTime)
- def fromBytes(bytes:ByteBuffer) = new Date(LongSerializer.fromBytes(bytes).longValue)
+ def toByteBuffer(date:Date) = LongSerializer.toByteBuffer(date.getTime)
+ def fromByteBuffer(bytes:ByteBuffer) = new Date(LongSerializer.fromByteBuffer(bytes).longValue)
def toString(obj:Date) = obj.getTime.toString
def fromString(str:String) = new Date(str.toLong.longValue)
}
diff --git a/src/main/scala/com/shorrockin/cascal/serialization/TupleSerializer.scala b/src/main/scala/com/shorrockin/cascal/serialization/TupleSerializer.scala
new file mode 100644
index 0000000..af72575
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/serialization/TupleSerializer.scala
@@ -0,0 +1,103 @@
+package com.shorrockin.cascal.serialization
+
+import java.nio.ByteBuffer
+import java.util.{Date, UUID}
+
+object TupleSerializer {
+
+ def extractType[T](bytes: ByteBuffer, mf: Manifest[T]): T = {
+ val length = (bytes.get() & 0xFF) << 8 | (bytes.get() & 0xFF)
+ val typeBuffer = bytes.duplicate
+ typeBuffer.limit(typeBuffer.position + length)
+
+ bytes.position(typeBuffer.position + length + 1)
+
+ val ser = Serializer.Default(mf.erasure)
+ ser.fromByteBuffer(typeBuffer).asInstanceOf[T]
+ }
+
+ def byteBuffer[T](value: T)(implicit mf: Manifest[T]): ByteBuffer = {
+ value match {
+ case x: String if mf.erasure == classOf[String] => StringSerializer.toByteBuffer(x)
+ case x: UUID if mf.erasure == classOf[UUID] => UUIDSerializer.toByteBuffer(x)
+ case x: Int if mf.erasure == classOf[Int] => IntSerializer.toByteBuffer(x)
+ case x: Long if mf.erasure == classOf[Long] => LongSerializer.toByteBuffer(x)
+ case x: Boolean if mf.erasure == classOf[Boolean] => BooleanSerializer.toByteBuffer(x)
+ case x: Float if mf.erasure == classOf[Float] => FloatSerializer.toByteBuffer(x)
+ case x: Double if mf.erasure == classOf[Double] => DoubleSerializer.toByteBuffer(x)
+ case x: Date if mf.erasure == classOf[Date] => DateSerializer.toByteBuffer(x)
+ case None => ByteBuffer.allocate(0)
+ }
+ }
+}
+
+class CompositeBuffer(val buffers: ByteBuffer*) {
+
+ val lengthBytesSize = 2
+ val endOfComponentSize = 1
+ val compositeOverheadSize = lengthBytesSize + endOfComponentSize
+
+ def buffer(): ByteBuffer = {
+ val buffersSize = buffers.foldLeft(0){(sum, buffer) => sum + buffer.remaining}
+ val requiredSize = buffersSize + buffers.size * compositeOverheadSize
+ val buffer = ByteBuffer.allocate(requiredSize)
+
+ buffers foreach {buff =>
+ buffer.putShort(buff.remaining.asInstanceOf[Short]).put(buff).put(0.toByte)
+ }
+ buffer.rewind
+ buffer
+ }
+}
+
+object Tuple2Serializer {
+ import TupleSerializer._
+
+ def toByteBuffer[T1: Manifest, T2: Manifest](tuple: Tuple2[T1, T2]): ByteBuffer = {
+ val buffer = new CompositeBuffer(byteBuffer(tuple._1), byteBuffer(tuple._2))
+ buffer.buffer
+ }
+
+ def fromByteBuffer[T1, T2](bytes:ByteBuffer, mf1: Manifest[T1], mf2: Manifest[T2]): Tuple2[T1, T2] = {
+ (extractType(bytes, mf1), extractType(bytes, mf2))
+ }
+}
+
+object Tuple3Serializer {
+ import TupleSerializer._
+
+ def toByteBuffer[T1: Manifest, T2: Manifest, T3: Manifest](tuple: Tuple3[T1, T2, T3]): ByteBuffer = {
+ val buffer = new CompositeBuffer(byteBuffer(tuple._1), byteBuffer(tuple._2), byteBuffer(tuple._3))
+ buffer.buffer
+ }
+
+ def fromByteBuffer[T1, T2, T3](bytes:ByteBuffer, mf1: Manifest[T1], mf2: Manifest[T2], mf3: Manifest[T3]): Tuple3[T1, T2, T3] = {
+ (extractType(bytes, mf1), extractType(bytes, mf2), extractType(bytes, mf3))
+ }
+}
+
+object Tuple4Serializer {
+ import TupleSerializer._
+
+ def toByteBuffer[T1: Manifest, T2: Manifest, T3: Manifest, T4: Manifest](tuple: Tuple4[T1, T2, T3, T4]): ByteBuffer = {
+ val buffer = new CompositeBuffer(byteBuffer(tuple._1), byteBuffer(tuple._2), byteBuffer(tuple._3), byteBuffer(tuple._4))
+ buffer.buffer
+ }
+
+ def fromByteBuffer[T1, T2, T3, T4](bytes:ByteBuffer, mf1: Manifest[T1], mf2: Manifest[T2], mf3: Manifest[T3], mf4: Manifest[T4]): Tuple4[T1, T2, T3, T4] = {
+ (extractType(bytes, mf1), extractType(bytes, mf2), extractType(bytes, mf3), extractType(bytes, mf4))
+ }
+}
+
+object Tuple5Serializer {
+ import TupleSerializer._
+
+ def toByteBuffer[T1: Manifest, T2: Manifest, T3: Manifest, T4: Manifest, T5: Manifest](tuple: Tuple5[T1, T2, T3, T4, T5]): ByteBuffer = {
+ val buffer = new CompositeBuffer(byteBuffer(tuple._1), byteBuffer(tuple._2), byteBuffer(tuple._3), byteBuffer(tuple._4), byteBuffer(tuple._5))
+ buffer.buffer
+ }
+
+ def fromByteBuffer[T1, T2, T3, T4, T5](bytes:ByteBuffer, mf1: Manifest[T1], mf2: Manifest[T2], mf3: Manifest[T3], mf4: Manifest[T4], mf5: Manifest[T5]): Tuple5[T1, T2, T3, T4, T5] = {
+ (extractType(bytes, mf1), extractType(bytes, mf2), extractType(bytes, mf3), extractType(bytes, mf4), extractType(bytes, mf5))
+ }
+}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/session/Operation.scala b/src/main/scala/com/shorrockin/cascal/session/Operation.scala
index 871bc50..5d58ff8 100644
--- a/src/main/scala/com/shorrockin/cascal/session/Operation.scala
+++ b/src/main/scala/com/shorrockin/cascal/session/Operation.scala
@@ -1,5 +1,6 @@
package com.shorrockin.cascal.session
+import scala.language.existentials
import org.apache.cassandra.thrift.{Deletion, Mutation}
import com.shorrockin.cascal.model._
import com.shorrockin.cascal.utils.Utils.now
@@ -31,6 +32,13 @@ case class Insert(val column:Column[_]) extends Operation {
val keyspace = column.keyspace
}
+case class Add(val column:CounterColumn[_]) extends Operation {
+ lazy val mutation = new Mutation().setColumn_or_supercolumn(column.columnOrSuperColumn)
+ val family = column.family
+ val key = column.key
+ val keyspace = column.keyspace
+}
+
/**
* companion class for simplified deletion creation.
@@ -38,6 +46,7 @@ case class Insert(val column:Column[_]) extends Operation {
case object Delete {
def apply(container:ColumnContainer[_, _], predicate:Predicate) = new Delete(container, predicate)
def apply(container:ColumnContainer[_, _]) = new Delete(container, EmptyPredicate)
+ def apply(column:Column[_]) = new Delete(column.key, ColumnPredicate(column.name :: Nil))
}
/**
diff --git a/src/main/scala/com/shorrockin/cascal/session/Session.scala b/src/main/scala/com/shorrockin/cascal/session/Session.scala
index 6cf1838..7133c0d 100644
--- a/src/main/scala/com/shorrockin/cascal/session/Session.scala
+++ b/src/main/scala/com/shorrockin/cascal/session/Session.scala
@@ -1,20 +1,21 @@
package com.shorrockin.cascal.session
+import scala.language.implicitConversions
import scala.collection.mutable
import collection.immutable.HashSet
import java.util.concurrent.atomic.AtomicLong
import java.util.{Map => JMap, List => JList, HashMap, ArrayList}
import java.nio.ByteBuffer
-
import org.apache.thrift.protocol.TBinaryProtocol
import org.apache.thrift.transport.{TFramedTransport, TSocket}
import org.apache.cassandra.thrift.{Mutation, Cassandra, NotFoundException, ConsistencyLevel}
import org.apache.cassandra.thrift.{Column => CassColumn}
import org.apache.cassandra.thrift.{SuperColumn => CassSuperColumn}
-
import com.shorrockin.cascal.model._
import com.shorrockin.cascal.utils.Conversions._
import com.shorrockin.cascal.utils.Utils.now
+import scala.collection.mutable.LinkedHashMap
+import scala.collection.Map
/**
* a cascal session is the entry point for interacting with the
@@ -22,22 +23,25 @@ import com.shorrockin.cascal.utils.Utils.now
*
* @author Chris Shorrock
*/
-class Session(val host:Host, val defaultConsistency:Consistency, val framedTransport:Boolean) extends SessionTemplate {
+class Session(val host:Host, val defaultConsistency:Consistency, val noFramedTransport:Boolean) extends SessionTemplate {
- def this(host:String, port:Int, timeout:Int, defaultConsistency:Consistency, framedTransport:Boolean) = this(Host(host, port, timeout), defaultConsistency, framedTransport)
+ def this(host:String, port:Int, timeout:Int, defaultConsistency:Consistency, noFramedTransport:Boolean) = this(Host(host, port, timeout), defaultConsistency, noFramedTransport)
def this(host:String, port:Int, timeout:Int, defaultConsistency:Consistency) = this(host, port, timeout, defaultConsistency, false)
def this(host:String, port:Int, timeout:Int) = this(host, port, timeout, Consistency.One, false)
private val sock = {
- if (framedTransport) new TFramedTransport(new TSocket(host.address, host.port, host.timeout))
- else new TSocket(host.address, host.port, host.timeout)
+ if (noFramedTransport) {
+ new TSocket(host.address, host.port, host.timeout)
+ } else {
+ new TFramedTransport(new TSocket(host.address, host.port, host.timeout))
+ }
}
private val protocol = new TBinaryProtocol(sock)
var lastError: Option[Throwable] = None
- val client = new Cassandra.Client(protocol, protocol)
+ val client = new Cassandra.Client(protocol)
/**
* opens the socket
@@ -136,12 +140,17 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
}
}
-
+ def truncate(cfname: String) = {
+ client.truncate(cfname)
+ }
+
/**
* returns the column value for the specified column
*/
- def get[ResultType](col: Gettable[ResultType], consistency: Consistency): Option[ResultType] = detect {
+ def get[ResultType, ValueType](col: Gettable[ResultType, ValueType], consistency: Consistency): Option[ResultType] = detect {
+ verifyKeyspace(col.keyspace.value)
try {
+ verifyKeyspace(col.keyspace.value)
val result = client.get(ByteBuffer.wrap(col.key.value.getBytes("UTF-8")), col.columnPath, consistency)
Some(col.convertGetResult(result))
} catch {
@@ -153,7 +162,7 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
/**
* returns the column value for the specified column, using the default consistency
*/
- def get[ResultType](col: Gettable[ResultType]): Option[ResultType] = get(col, defaultConsistency)
+ def get[ResultType, ValueType](col: Gettable[ResultType, ValueType]): Option[ResultType] = get(col, defaultConsistency)
/**
@@ -162,17 +171,23 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
def insert[E](col: Column[E], consistency: Consistency) = detect {
verifyInsert(col)
verifyKeyspace(col.keyspace.value)
- val cassCol = new CassColumn(col.name, col.value, col.time)
- client.insert(col.key.value, col.key.columnParent, cassCol, consistency)
+ client.insert(col.key.value, col.owner.asInstanceOf[ColumnContainer[_, _]].columnParent, col.cassandraColumn, consistency)
col
}
-
+
/**
* inserts the specified column value using the default consistency
*/
def insert[E](col: Column[E]): Column[E] = insert(col, defaultConsistency)
+ def add[E](col: CounterColumn[E], consistency: Consistency) = detect {
+ verifyKeyspace(col.keyspace.value)
+ client.add(col.key.value, col.owner.asInstanceOf[ColumnContainer[_, _]].columnParent, col.cassandraColumn, consistency)
+ col
+ }
+
+ def add[E](col: CounterColumn[E]): CounterColumn[E] = add(col, defaultConsistency)
/**
* counts the number of columns in the specified column container
@@ -191,7 +206,30 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
*/
def count(container: ColumnContainer[_, _]): Int = count(container, defaultConsistency)
+ def count[ColumnType, ResultType](containers: Seq[ColumnContainer[ColumnType, ResultType]],
+ predicate:Predicate = EmptyPredicate,
+ consistency: Consistency = defaultConsistency): Map[ColumnContainer[ColumnType, ResultType], Int] = detect {
+
+ if (containers.size > 0) detect {
+ val firstContainer = containers(0)
+ val keyspace = firstContainer.keyspace
+ verifyKeyspace(keyspace.value)
+ val keyStrings = containers.map {container => ByteBuffer.wrap(container.key.value.getBytes("UTF-8"))}
+ val result = client.multiget_count(keyStrings, firstContainer.columnParent, predicate.slicePredicate, consistency)
+
+ val containersByKey = containers.map {container =>
+ (container.key.value, container)
+ }.toMap
+
+ result map {element =>
+ (containersByKey(element._1), element._2.toInt)
+ }
+ } else {
+ throw new IllegalArgumentException("must provide at least 1 container for a count(keys, predicate, consistency) call")
+ }
+ }
+
/**
* removes the specified column container
*/
@@ -217,6 +255,20 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
}
+ /**
+ * removes the specified column container using the default consistency
+ */
+ def remove(column: CounterColumn[_]): Unit = remove(column, defaultConsistency)
+
+ /**
+ * removes the specified column container
+ */
+ def remove(column: CounterColumn[_], consistency: Consistency): Unit = detect {
+ verifyKeyspace(column.keyspace.value)
+ client.remove_counter(column.key.value, column.columnPath, consistency)
+ }
+
+
/**
* removes the specified column container using the default consistency
*/
@@ -233,7 +285,6 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
container.convertListResult(convertList(results))
}
-
/**
* performs a list of the specified container using no predicate and the default consistency.
*/
@@ -302,13 +353,13 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
def list[ColumnType, ListType](family: ColumnFamily[Key[ColumnType, ListType]], range: KeyRange, predicate: Predicate, consistency: Consistency): Map[Key[ColumnType, ListType], ListType] = detect {
verifyKeyspace(family.keyspace.value)
val results = client.get_range_slices(family.columnParent, predicate.slicePredicate, range.cassandraRange, consistency)
- var map = Map[Key[ColumnType, ListType], ListType]()
+ var mapBuilder = LinkedHashMap.canBuildFrom[Key[ColumnType, ListType], ListType]()
convertList(results).foreach { (keyslice) =>
val key = (family \ keyslice.key)
- map = map + (key -> key.convertListResult(keyslice.columns))
+ mapBuilder += (key -> key.convertListResult(keyslice.columns))
}
- map
+ mapBuilder.result
}
@@ -327,7 +378,23 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
list(family, range, EmptyPredicate, defaultConsistency)
}
+
+ def list[ColumnType, ListType](query: IndexQuery): Map[StandardKey, Seq[Column[StandardKey]]] = list(query, EmptyPredicate, defaultConsistency)
+
+ def list[ColumnType, ListType](query: IndexQuery, predicate: Predicate, consistency: Consistency): Map[StandardKey, Seq[Column[StandardKey]]] = detect {
+ val family: ColumnFamily[StandardKey] = query.family
+ verifyKeyspace(family.keyspace.value)
+ val results = client.get_indexed_slices(query.family.columnParent, query.indexClause, predicate.slicePredicate, consistency)
+
+ var mapBuilder = LinkedHashMap.canBuildFrom[StandardKey, Seq[Column[StandardKey]]]()
+ convertList(results).foreach { (keyslice) =>
+ val key = (family \ keyslice.key)
+ mapBuilder += (key -> key.convertListResult(keyslice.columns))
+ }
+ mapBuilder.result
+ }
+
/**
* performs the specified seq of operations in batch. assumes all operations belong
* to the same keyspace. If they do not then the first keyspace in the first operation
@@ -424,18 +491,18 @@ class Session(val host:Host, val defaultConsistency:Consistency, val framedTrans
}
private def Buffer[T](v:java.util.List[T]) = {
- scala.collection.JavaConversions.asBuffer(v)
+ scala.collection.JavaConversions.asScalaBuffer(v)
}
implicit private def convertList[T](v:java.util.List[T]):List[T] = {
- scala.collection.JavaConversions.asBuffer(v).toList
+ scala.collection.JavaConversions.asScalaBuffer(v).toList
}
implicit private def convertMap[K,V](v:java.util.Map[K,V]): scala.collection.mutable.Map[K,V] = {
- scala.collection.JavaConversions.asMap(v)
+ scala.collection.JavaConversions.mapAsScalaMap(v)
}
implicit private def convertSet[T](s:java.util.Set[T]):scala.collection.mutable.Set[T] = {
- scala.collection.JavaConversions.asSet(s)
+ scala.collection.JavaConversions.asScalaSet(s)
}
}
diff --git a/src/main/scala/com/shorrockin/cascal/session/SessionPool.scala b/src/main/scala/com/shorrockin/cascal/session/SessionPool.scala
index 0cfc578..9d86b75 100644
--- a/src/main/scala/com/shorrockin/cascal/session/SessionPool.scala
+++ b/src/main/scala/com/shorrockin/cascal/session/SessionPool.scala
@@ -5,7 +5,7 @@ import org.apache.commons.pool.impl.{GenericObjectPoolFactory, GenericObjectPool
import com.shorrockin.cascal.utils.Logging
import com.shorrockin.cascal.jmx.CascalStatistics
import com.shorrockin.cascal.model._
-
+import scala.collection.Map
/**
* a session pool which maintains a collection of open sessions so that
@@ -19,7 +19,7 @@ import com.shorrockin.cascal.model._
*
* @author Chris Shorrock
*/
-class SessionPool(val hosts:Seq[Host], val params:PoolParams, consistency:Consistency, framedTransport:Boolean) extends SessionTemplate {
+class SessionPool(val hosts:Seq[Host], val params:PoolParams, val defaultConsistency:Consistency, framedTransport:Boolean) extends SessionTemplate {
def this(hosts:Seq[Host], params:PoolParams, consistency:Consistency) = this(hosts, params, consistency, false)
def this(hosts:Seq[Host], params:PoolParams) = this(hosts, params, Consistency.One, false)
@@ -114,14 +114,14 @@ class SessionPool(val hosts:Seq[Host], val params:PoolParams, consistency:Consis
/**
* used to create sessions
*/
- private object SessionFactory extends PoolableObjectFactory with Logging {
+ private object SessionFactory extends PoolableObjectFactory[Session] with Logging {
// instead of randomly choosing a host we'll attempt to round-robin them, may not
// be completely round robin with multiple threads but it should provide a more
// even spread than something random.
var lastHostUsed = 0
def next(current:Int) = (current + 1) % hosts.size
- def makeObject:Object = makeSession(next(lastHostUsed), 0)
+ def makeObject:Session = makeSession(next(lastHostUsed), 0)
def makeSession(hostIndex:Int, count:Int):Session = {
if (count < hosts.size) {
@@ -130,7 +130,7 @@ class SessionPool(val hosts:Seq[Host], val params:PoolParams, consistency:Consis
try {
log.debug("attempting to create connection to: " + host)
- val session = new Session(host.address, host.port, host.timeout, consistency, framedTransport)
+ val session = new Session(host.address, host.port, host.timeout, defaultConsistency, framedTransport)
session.open
CascalStatistics.creation(host)
session
@@ -145,15 +145,13 @@ class SessionPool(val hosts:Seq[Host], val params:PoolParams, consistency:Consis
}
}
- def session(obj:Object) = obj.asInstanceOf[Session]
-
- def activateObject(obj:Object):Unit = {}
+ def activateObject(session:Session):Unit = {}
- def destroyObject(obj:Object):Unit = session(obj).close
+ def destroyObject(session:Session):Unit = session.close
- def validateObject(obj:Object):Boolean = session(obj).isOpen && !session(obj).hasError
+ def validateObject(session:Session):Boolean = session.isOpen && !session.hasError
- def passivateObject(obj:Object):Unit = {}
+ def passivateObject(session:Session):Unit = {}
}
@@ -163,18 +161,24 @@ class SessionPool(val hosts:Seq[Host], val params:PoolParams, consistency:Consis
def keyspaces:Seq[String] = borrow { _.keyspaces }
- def get[ResultType](col:Gettable[ResultType], consistency:Consistency):Option[ResultType] = borrow { _.get(col, consistency) }
+ def truncate(cfname: String) = borrow { _.truncate(cfname) }
+
+ def get[ResultType, ValueType](col:Gettable[ResultType, ValueType], consistency:Consistency):Option[ResultType] = borrow { _.get(col, consistency) }
- def get[ResultType](col:Gettable[ResultType]):Option[ResultType] = borrow { _.get(col) }
+ def get[ResultType, ValueType](col:Gettable[ResultType, ValueType]):Option[ResultType] = borrow { _.get(col) }
def insert[E](col:Column[E], consistency:Consistency):Column[E] = borrow { _.insert(col, consistency) }
def insert[E](col:Column[E]):Column[E] = borrow { _.insert(col) }
+ def add[E](col: CounterColumn[E], consistency: Consistency = defaultConsistency) = borrow { _.add(col, consistency) }
+
def count(container:ColumnContainer[_ ,_], consistency:Consistency):Int = borrow { _.count(container, consistency) }
def count(container:ColumnContainer[_, _]):Int = borrow { _.count(container) }
+ def count[ColumnType, ResultType](containers: Seq[ColumnContainer[ColumnType, ResultType]], predicate:Predicate = EmptyPredicate, consistency: Consistency = defaultConsistency): Map[ColumnContainer[ColumnType, ResultType], Int] = borrow { _.count(containers, predicate, consistency) }
+
def remove(container:ColumnContainer[_, _], consistency:Consistency):Unit = borrow { _.remove(container, consistency) }
def remove(container:ColumnContainer[_, _]):Unit = borrow { _.remove(container) }
@@ -183,6 +187,12 @@ class SessionPool(val hosts:Seq[Host], val params:PoolParams, consistency:Consis
def remove(column:Column[_]):Unit = borrow { _.remove(column) }
+ /**
+ * removes the specified column container
+ */
+ def remove(column: CounterColumn[_], consistency: Consistency = defaultConsistency): Unit = borrow { _.remove(column, consistency) }
+
+
def list[ResultType](container:ColumnContainer[_, ResultType], predicate:Predicate, consistency:Consistency):ResultType = borrow { _.list(container, predicate, consistency) }
def list[ResultType](container:ColumnContainer[_, ResultType]):ResultType = borrow { _.list(container) }
@@ -201,6 +211,10 @@ class SessionPool(val hosts:Seq[Host], val params:PoolParams, consistency:Consis
def list[ColumnType, ListType](family:ColumnFamily[Key[ColumnType, ListType]], range:KeyRange):Map[Key[ColumnType, ListType], ListType] = borrow { _.list(family, range) }
+ def list[ColumnType, ListType](query: IndexQuery): Map[StandardKey, Seq[Column[StandardKey]]] = borrow { _.list(query) }
+
+ def list[ColumnType, ListType](query: IndexQuery, predicate: Predicate, consistency: Consistency): Map[StandardKey, Seq[Column[StandardKey]]] = borrow { _.list(query, predicate, consistency) }
+
def batch(ops:Seq[Operation], consistency:Consistency):Unit = borrow { _.batch(ops, consistency) }
def batch(ops:Seq[Operation]):Unit = borrow { _.batch(ops) }
diff --git a/src/main/scala/com/shorrockin/cascal/session/SessionTemplate.scala b/src/main/scala/com/shorrockin/cascal/session/SessionTemplate.scala
index d4a5baa..65d91a6 100755
--- a/src/main/scala/com/shorrockin/cascal/session/SessionTemplate.scala
+++ b/src/main/scala/com/shorrockin/cascal/session/SessionTemplate.scala
@@ -1,6 +1,7 @@
package com.shorrockin.cascal.session
import com.shorrockin.cascal.model._
+import scala.collection.Map
/**
* session template describes the core function of a session. sub-classes of a template
@@ -11,6 +12,8 @@ import com.shorrockin.cascal.model._
*/
trait SessionTemplate {
+ val defaultConsistency:Consistency
+
/**
* return the current cluster name of the cassandra instance
*/
@@ -28,17 +31,18 @@ trait SessionTemplate {
*/
def keyspaces:Seq[String]
-
+ def truncate(cfname: String)
+
/**
* returns the column value for the specified column
*/
- def get[ResultType](col:Gettable[ResultType], consistency:Consistency):Option[ResultType]
+ def get[ResultType, ValueType](col:Gettable[ResultType, ValueType], consistency:Consistency):Option[ResultType]
/**
* returns the column value for the specified column, using the default consistency
*/
- def get[ResultType](col:Gettable[ResultType]):Option[ResultType]
+ def get[ResultType, ValueType](col:Gettable[ResultType, ValueType]):Option[ResultType]
/**
@@ -52,7 +56,9 @@ trait SessionTemplate {
*/
def insert[E](col:Column[E]):Column[E]
-
+
+ def add[E](col: CounterColumn[E], consistency: Consistency = defaultConsistency): CounterColumn[E]
+
/**
* counts the number of columns in the specified column container
*/
@@ -64,7 +70,9 @@ trait SessionTemplate {
*/
def count(container:ColumnContainer[_, _]):Int
-
+ def count[ColumnType, ResultType](containers: Seq[ColumnContainer[ColumnType, ResultType]], predicate:Predicate = EmptyPredicate, consistency: Consistency = defaultConsistency): Map[ColumnContainer[ColumnType, ResultType], Int]
+
+
/**
* removes the specified column container
*/
@@ -88,7 +96,12 @@ trait SessionTemplate {
*/
def remove(column:Column[_]):Unit
+ /**
+ * removes the specified column counter container
+ */
+ def remove(column: CounterColumn[_], consistency: Consistency = defaultConsistency): Unit
+
/**
* performs a list of the provided standard key. uses the list of columns as the predicate
* to determine which columns to return.
@@ -150,7 +163,10 @@ trait SessionTemplate {
*/
def list[ColumnType, ListType](family:ColumnFamily[Key[ColumnType, ListType]], range:KeyRange):Map[Key[ColumnType, ListType], ListType]
-
+ def list[ColumnType, ListType](query: IndexQuery): Map[StandardKey, Seq[Column[StandardKey]]]
+
+ def list[ColumnType, ListType](query: IndexQuery, predicate: Predicate, consistency: Consistency): Map[StandardKey, Seq[Column[StandardKey]]]
+
/**
* performs the specified seq of operations in batch. assumes all operations belong
* to the same keyspace. If they do not then the first keyspace in the first operation
diff --git a/src/main/scala/com/shorrockin/cascal/test/EmbeddedPool.scala b/src/main/scala/com/shorrockin/cascal/test/EmbeddedPool.scala
new file mode 100644
index 0000000..1c438d6
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/test/EmbeddedPool.scala
@@ -0,0 +1,134 @@
+package com.shorrockin.cascal.test
+
+import org.apache.cassandra.thrift.CassandraDaemon
+import org.apache.cassandra.config.DatabaseDescriptor
+import java.io.File
+import scala.collection.JavaConversions._
+import java.util.{Collection => JCollection}
+import java.net.ConnectException
+import org.apache.thrift.transport.{TTransportException, TSocket}
+import com.shorrockin.cascal.session._
+import com.shorrockin.cascal.utils.{Utils, Logging}
+import org.apache.cassandra.config.{CFMetaData, KSMetaData}
+import java.util.concurrent.ExecutorService
+import java.util.concurrent.Executors
+
+/**
+ * trait which mixes in the functionality necessary to embed
+ * cassandra into a unit test
+ */
+trait EmbeddedPool extends Logging with Schema {
+
+ def borrow(f:(Session) => Unit) = {
+ init
+ EmbeddedCassandraServer.pool.borrow(f)
+ }
+
+ def init = {
+ EmbeddedCassandraServer.init(timeout, ksMetaData)
+ }
+
+ def shutdown = {
+ EmbeddedCassandraServer.shutdown
+ }
+}
+
+/**
+ * maintains the single instance of the Cassandra server
+ */
+object EmbeddedCassandraServer extends Logging {
+ import Utils._
+ var initialized = false
+
+ var pool: SessionPool = null
+ var daemon = new CassandraDaemonThread
+
+ def init(timeout: Int, ksMetaData: KSMetaData) = synchronized {
+ if (!initialized) {
+ val homeDirectory = new File("target/cassandra.home.unit-tests")
+ delete(homeDirectory)
+ homeDirectory.mkdirs
+
+ log.debug("creating cassandra instance at: " + homeDirectory.getCanonicalPath)
+ log.debug("copying cassandra configuration files to root directory")
+
+ val fileSep = System.getProperty("file.separator")
+ val storageFile = new File(homeDirectory, "cassandra.yaml")
+ val logFile = new File(homeDirectory, "log4j.properties")
+
+ replace(copy(resource("/cassandra.yaml"), storageFile), ("%temp-dir%" -> (homeDirectory.getCanonicalPath + fileSep)))
+ copy(resource("/log4j.properties"), logFile)
+
+ System.setProperty("cassandra.config", toURI(homeDirectory.getCanonicalPath + fileSep + "cassandra.yaml").toString)
+ System.setProperty("log4j.configuration", toURI(homeDirectory.getCanonicalPath + fileSep + "log4j.properties").toString);
+ System.setProperty("cassandra-foreground","true");
+
+ log.debug("creating data file and log location directories")
+ DatabaseDescriptor.getAllDataFileLocations.foreach { (file) => new File(file).mkdirs }
+
+ loadSchema(ksMetaData)
+
+ daemon.start
+
+ // try to make sockets until the server opens up - there has to be a better
+ // way - just not sure what it is.
+ val socket = new TSocket("localhost", DatabaseDescriptor.getRpcPort)
+ var opened = false
+ while (!opened) {
+ try {
+ socket.open()
+ opened = true
+ socket.close()
+ } catch {
+ case e:TTransportException => /* ignore */
+ case e:ConnectException => /* ignore */
+ }
+ }
+
+ val hosts = Host("localhost", DatabaseDescriptor.getRpcPort, timeout) :: Nil
+ val params = new PoolParams(10, ExhaustionPolicy.Fail, timeout + 100, 6, 2)
+ pool = new SessionPool(hosts, params, Consistency.One)
+
+ initialized = true
+ }
+ }
+
+ private def resource(str:String) = classOf[EmbeddedPool].getResourceAsStream(str)
+
+ private def loadSchema(ksMetaData: KSMetaData) = {
+ val ksList = new java.util.ArrayList[KSMetaData]()
+ ksList.add(ksMetaData)
+ import org.apache.cassandra.config.Schema
+ Schema.instance.load(ksList)
+ }
+
+ def shutdown {
+ daemon.close
+ }
+}
+
+/**
+ * daemon thread used to start and stop cassandra
+ */
+class CassandraDaemonThread extends Thread("CassandraDaemonThread") with Logging {
+ private val daemon = new CassandraDaemon
+
+ /**
+ * starts the server and blocks until it has
+ * completed booting up.
+ */
+ override def run:Unit = {
+ log.debug("starting cassandra daemon")
+ daemon.activate
+ log.debug("Cassandra daemon started")
+ }
+
+ def close():Unit = {
+ log.debug("instructing cassandra deamon to shut down")
+ daemon.deactivate
+ log.debug("blocking on cassandra shutdown")
+ this.join
+ log.debug("cassandra shut down")
+ }
+}
+
diff --git a/src/main/scala/com/shorrockin/cascal/test/Schema.scala b/src/main/scala/com/shorrockin/cascal/test/Schema.scala
new file mode 100644
index 0000000..f3ec516
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/test/Schema.scala
@@ -0,0 +1,26 @@
+package com.shorrockin.cascal.test
+
+import scala.collection.JavaConversions._
+import org.apache.cassandra.config.{CFMetaData, DatabaseDescriptor, KSMetaData}
+import org.apache.cassandra.db.marshal.{AbstractType, BytesType, TimeUUIDType}
+import org.apache.cassandra.db.ColumnFamilyType
+import org.apache.cassandra.locator.SimpleStrategy
+import java.nio.ByteBuffer
+import org.apache.cassandra.config.ColumnDefinition
+import org.apache.cassandra.thrift.IndexType
+import com.shorrockin.cascal.utils.Conversions.byteBuffer
+import org.apache.cassandra.db.marshal.LongType
+import org.apache.cassandra.locator.AbstractReplicationStrategy
+
+trait Schema {
+ val timeout: Int
+ val keyspace: String
+ val strategyClass: Class[_ <: AbstractReplicationStrategy]
+ val cfMetaDatas: Seq[CFMetaData]
+
+ lazy val ksMetaData = KSMetaData.testMetadataNotDurable(keyspace, strategyClass, Map("replication_factor" -> "1"), cfMetaDatas:_*)
+
+ def cfMetaData(name: String, cfType: ColumnFamilyType, colType: AbstractType[_]) = {
+ new CFMetaData(keyspace, name, cfType, colType, null);
+ }
+}
\ No newline at end of file
diff --git a/src/main/scala/com/shorrockin/cascal/testing/CassandraTestPool.scala b/src/main/scala/com/shorrockin/cascal/testing/CassandraTestPool.scala
deleted file mode 100644
index c59c50d..0000000
--- a/src/main/scala/com/shorrockin/cascal/testing/CassandraTestPool.scala
+++ /dev/null
@@ -1,112 +0,0 @@
-// TODO Need to update this for Cassandra 0.7
-
-package com.shorrockin.cascal.testing
-
-import org.apache.cassandra.thrift.CassandraDaemon
-import org.apache.cassandra.config.DatabaseDescriptor
-import java.io.File
-import java.net.ConnectException
-import org.apache.thrift.transport.{TTransportException, TSocket}
-import com.shorrockin.cascal.session._
-import com.shorrockin.cascal.utils.{Utils, Logging}
-/**
- * trait which mixes in the functionality necessary to embed
- * cassandra into a unit test
- */
-trait CassandraTestPool extends Logging {
- def borrow(f:(Session) => Unit) = {
- EmbeddedTestCassandra.init
- EmbeddedTestCassandra.pool.borrow(f)
- }
-}
-
-/**
- * maintains the single instance of the cassandra server
- */
-object EmbeddedTestCassandra extends Logging {
- import Utils._
- var initialized = false
-
- val hosts = Host("localhost", 9160, 250) :: Nil
- val params = new PoolParams(10, ExhaustionPolicy.Fail, 500L, 6, 2)
- lazy val pool = new SessionPool(hosts, params, Consistency.One)
-
- def init = synchronized {
- if (!initialized) {
- val homeDirectory = new File("target/cassandra.home.unit-tests")
- delete(homeDirectory)
- homeDirectory.mkdirs
-
- log.debug("creating cassandra instance at: " + homeDirectory.getCanonicalPath)
- log.debug("copying cassandra configuration files to root directory")
-
- val fileSep = System.getProperty("file.separator")
- val storageFile = new File(homeDirectory, "storage-conf.xml")
- val logFile = new File(homeDirectory, "log4j.properties")
-
- replace(copy(resource("/storage-conf.xml"), storageFile), ("%temp-dir%" -> (homeDirectory.getCanonicalPath + fileSep)))
- copy(resource("/log4j.properties"), logFile)
-
- System.setProperty("storage-config", homeDirectory.getCanonicalPath)
-
- log.debug("creating data file and log location directories")
- DatabaseDescriptor.getAllDataFileLocations.foreach { (file) => new File(file).mkdirs }
- // new File(DatabaseDescriptor.getLogFileLocation).mkdirs
-
- val daemon = new CassandraDaemonThread
- daemon.start
-
- // try to make sockets until the server opens up - there has to be a better
- // way - just not sure what it is.
- val socket = new TSocket("localhost", 9160);
- var opened = false
- while (!opened) {
- try {
- socket.open()
- opened = true
- socket.close()
- } catch {
- case e:TTransportException => /* ignore */
- case e:ConnectException => /* ignore */
- }
- }
-
- initialized = true
- }
- }
-
- private def resource(str:String) = classOf[CassandraTestPool].getResourceAsStream(str)
-}
-
-/**
- * daemon thread used to start and stop cassandra
- */
-class CassandraDaemonThread extends Thread("CassandraDaemonThread") with Logging {
- private val daemon = new CassandraDaemon
-
- setDaemon(true)
-
- /**
- * starts the server and blocks until it has
- * completed booting up.
- */
- def startServer = {
-
- }
-
- override def run:Unit = {
- log.debug("initializing cassandra daemon")
- daemon.init(new Array[String](0))
- log.debug("starting cassandra daemon")
- daemon.start
- }
-
- def close():Unit = {
- log.debug("instructing cassandra deamon to shut down")
- daemon.stop
- log.debug("blocking on cassandra shutdown")
- this.join
- log.debug("cassandra shut down")
- }
-}
-
diff --git a/src/main/scala/com/shorrockin/cascal/utils/Conversions.scala b/src/main/scala/com/shorrockin/cascal/utils/Conversions.scala
index 0655d78..f2c01c8 100644
--- a/src/main/scala/com/shorrockin/cascal/utils/Conversions.scala
+++ b/src/main/scala/com/shorrockin/cascal/utils/Conversions.scala
@@ -1,5 +1,6 @@
package com.shorrockin.cascal.utils
+import scala.language.implicitConversions
import java.nio.charset.Charset
import com.shorrockin.cascal.model.{Column, Keyspace}
import java.util.{Date, UUID => JavaUUID}
@@ -12,38 +13,38 @@ import java.nio.ByteBuffer
object Conversions {
val utf8 = Charset.forName("UTF-8")
- implicit def keyspace(str:String) = new Keyspace(str)
+ implicit def stringToKeyspace(str:String) = new Keyspace(str)
- implicit def bytes(date:Date):ByteBuffer = DateSerializer.toBytes(date)
- implicit def date(bytes:ByteBuffer):Date = DateSerializer.fromBytes(bytes)
+ implicit def byteBuffer(date:Date):ByteBuffer = DateSerializer.toByteBuffer(date)
+ implicit def date(bytes:ByteBuffer):Date = DateSerializer.fromByteBuffer(bytes)
implicit def string(date:Date):String = DateSerializer.toString(date)
- implicit def bytes(b:Boolean):ByteBuffer = BooleanSerializer.toBytes(b)
- implicit def boolean(bytes:ByteBuffer):Boolean = BooleanSerializer.fromBytes(bytes)
+ implicit def byteBuffer(b:Boolean):ByteBuffer = BooleanSerializer.toByteBuffer(b)
+ implicit def boolean(bytes:ByteBuffer):Boolean = BooleanSerializer.fromByteBuffer(bytes)
implicit def string(b:Boolean):String = BooleanSerializer.toString(b)
- implicit def bytes(b:Float):ByteBuffer = FloatSerializer.toBytes(b)
- implicit def float(bytes:ByteBuffer):Float = FloatSerializer.fromBytes(bytes)
+ implicit def byteBuffer(b:Float):ByteBuffer = FloatSerializer.toByteBuffer(b)
+ implicit def float(bytes:ByteBuffer):Float = FloatSerializer.fromByteBuffer(bytes)
implicit def string(b:Float):String = FloatSerializer.toString(b)
- implicit def bytes(b:Double):ByteBuffer = DoubleSerializer.toBytes(b)
- implicit def double(bytes:ByteBuffer):Double = DoubleSerializer.fromBytes(bytes)
+ implicit def byteBuffer(b:Double):ByteBuffer = DoubleSerializer.toByteBuffer(b)
+ implicit def double(bytes:ByteBuffer):Double = DoubleSerializer.fromByteBuffer(bytes)
implicit def string(b:Double):String = DoubleSerializer.toString(b)
- implicit def bytes(l:Long):ByteBuffer = LongSerializer.toBytes(l)
- implicit def long(bytes:ByteBuffer):Long = LongSerializer.fromBytes(bytes)
+ implicit def byteBuffer(l:Long):ByteBuffer = LongSerializer.toByteBuffer(l)
+ implicit def long(bytes:ByteBuffer):Long = LongSerializer.fromByteBuffer(bytes)
implicit def string(l:Long):String = LongSerializer.toString(l)
- implicit def bytes(i:Int):ByteBuffer = IntSerializer.toBytes(i)
- implicit def int(bytes:ByteBuffer):Int = IntSerializer.fromBytes(bytes)
+ implicit def byteBuffer(i:Int):ByteBuffer = IntSerializer.toByteBuffer(i)
+ implicit def int(bytes:ByteBuffer):Int = IntSerializer.fromByteBuffer(bytes)
implicit def string(i:Int) = IntSerializer.toString(i)
- implicit def bytes(str:String):ByteBuffer = StringSerializer.toBytes(str)
- implicit def string(bytes:ByteBuffer):String = StringSerializer.fromBytes(bytes)
+ implicit def byteBuffer(str:String):ByteBuffer = StringSerializer.toByteBuffer(str)
+ implicit def string(bytes:ByteBuffer):String = StringSerializer.fromByteBuffer(bytes)
implicit def string(source:JavaUUID) = UUIDSerializer.toString(source)
implicit def uuid(source:String) = UUIDSerializer.fromString(source)
- implicit def bytes(source:JavaUUID):ByteBuffer = UUIDSerializer.toBytes(source)
+ implicit def byteBuffer(source:JavaUUID):ByteBuffer = UUIDSerializer.toByteBuffer(source)
implicit def string(col:Column[_]):String = {
"%s -> %s (time: %s)".format(Conversions.string(col.name),
@@ -51,6 +52,18 @@ object Conversions {
col.time)
}
- implicit def toSeqBytes(values:Seq[String]) = values.map { (s) => Conversions.bytes(s) }
+ implicit def toSeqBytes(values:Seq[String]) = values.map { (s) => Conversions.byteBuffer(s) }
implicit def toJavaList[T](l: Seq[T]):java.util.List[T] = l.foldLeft(new java.util.ArrayList[T](l.size)){(al, e) => al.add(e); al}
+
+ implicit def byteBuffer[T1: Manifest, T2: Manifest](tuple: Tuple2[T1, T2]):ByteBuffer = Tuple2Serializer.toByteBuffer(tuple)
+ implicit def tuple[T1, T2](bytes:ByteBuffer)(implicit mf1: Manifest[T1], mf2: Manifest[T2]):Tuple2[T1, T2] = Tuple2Serializer.fromByteBuffer[T1, T2](bytes, mf1, mf2)
+
+ implicit def byteBuffer[T1: Manifest, T2: Manifest, T3: Manifest](tuple: Tuple3[T1, T2, T3]):ByteBuffer = Tuple3Serializer.toByteBuffer(tuple)
+ implicit def tuple[T1, T2, T3](bytes:ByteBuffer)(implicit mf1: Manifest[T1], mf2: Manifest[T2], mf3: Manifest[T3]):Tuple3[T1, T2, T3] = Tuple3Serializer.fromByteBuffer[T1, T2, T3](bytes, mf1, mf2, mf3)
+
+ implicit def byteBuffer[T1: Manifest, T2: Manifest, T3: Manifest, T4: Manifest](tuple: Tuple4[T1, T2, T3, T4]):ByteBuffer = Tuple4Serializer.toByteBuffer(tuple)
+ implicit def tuple[T1, T2, T3, T4](bytes:ByteBuffer)(implicit mf1: Manifest[T1], mf2: Manifest[T2], mf3: Manifest[T3], mf4: Manifest[T4]):Tuple4[T1, T2, T3, T4] = Tuple4Serializer.fromByteBuffer[T1, T2, T3, T4](bytes, mf1, mf2, mf3, mf4)
+
+ implicit def byteBuffer[T1: Manifest, T2: Manifest, T3: Manifest, T4: Manifest, T5: Manifest](tuple: Tuple5[T1, T2, T3, T4, T5]):ByteBuffer = Tuple5Serializer.toByteBuffer(tuple)
+ implicit def tuple[T1, T2, T3, T4, T5](bytes:ByteBuffer)(implicit mf1: Manifest[T1], mf2: Manifest[T2], mf3: Manifest[T3], mf4: Manifest[T4], mf5: Manifest[T5]):Tuple5[T1, T2, T3, T4, T5] = Tuple5Serializer.fromByteBuffer[T1, T2, T3, T4, T5](bytes, mf1, mf2, mf3, mf4, mf5)
}
diff --git a/src/main/scala/com/shorrockin/cascal/utils/ThreadLocal.scala b/src/main/scala/com/shorrockin/cascal/utils/ThreadLocal.scala
new file mode 100644
index 0000000..ebf1e47
--- /dev/null
+++ b/src/main/scala/com/shorrockin/cascal/utils/ThreadLocal.scala
@@ -0,0 +1,11 @@
+package com.shorrockin.cascal.utils
+
+/**
+ * utility class to make java.lang.ThreadLocal easier to use
+ * courtesy to mehack (http://mehack.com/)
+ */
+class ThreadLocal[T](init: => T) extends java.lang.ThreadLocal[T] with Function0[T] {
+ override def initialValue:T = init
+ def apply = get
+ def withValue[S](thunk:(T => S)):S = thunk(get)
+}
diff --git a/src/main/scala/com/shorrockin/cascal/utils/UUID.scala b/src/main/scala/com/shorrockin/cascal/utils/UUID.scala
index eaff6ab..01425e7 100644
--- a/src/main/scala/com/shorrockin/cascal/utils/UUID.scala
+++ b/src/main/scala/com/shorrockin/cascal/utils/UUID.scala
@@ -2,6 +2,7 @@ package com.shorrockin.cascal.utils
import java.util.{UUID => JavaUUID}
import _root_.com.eaio.uuid.{UUID => EaioUUID}
+import java.nio.ByteBuffer
/**
* utility method for working with, and creating uuids, suitable in a way
@@ -17,6 +18,9 @@ object UUID {
*/
def apply() = JavaUUID.fromString(new EaioUUID().toString());
+ def apply(data: ByteBuffer):JavaUUID = {
+ apply(data.array)
+ }
/**
* returns a new uuid based on the specified string
*/
diff --git a/src/main/scala/com/shorrockin/cascal/utils/Utils.scala b/src/main/scala/com/shorrockin/cascal/utils/Utils.scala
index 298571d..b1e529a 100755
--- a/src/main/scala/com/shorrockin/cascal/utils/Utils.scala
+++ b/src/main/scala/com/shorrockin/cascal/utils/Utils.scala
@@ -1,8 +1,10 @@
package com.shorrockin.cascal.utils
-import _root_.scala.io.Source
-import java.io.{FileWriter, InputStream, FileOutputStream, File}
+import scala.language.reflectiveCalls
+import _root_.scala.io.Source
+import java.io.{FileWriter, InputStream, FileOutputStream, File}
import java.util.concurrent.TimeUnit
+import java.net.URI
/**
* common utility functions that don't fit elsewhere.
@@ -40,13 +42,16 @@ object Utils extends Logging {
len = is.read(buf, 0, buf.length)
if (-1 != len) out.write(buf, 0, len)
}
- out.flush
}
file
}
-
+ def toURI(fileName: String): URI = {
+ val file = new File(fileName)
+ file.toURI
+ }
+
/**
* replaces all instances of the specified token with the specified replacement
* file in the source file.
@@ -58,9 +63,10 @@ object Utils extends Logging {
current
}
+ val lineSeparator = System.getProperty("line.separator")
val writer = new FileWriter(file)
manage(writer) {
- contents.foreach { writer.write(_) }
+ contents.foreach {x: String => writer.write(x + lineSeparator) }
writer.flush
}
diff --git a/src/test/resources/cassandra.yaml b/src/test/resources/cassandra.yaml
new file mode 100644
index 0000000..bef0841
--- /dev/null
+++ b/src/test/resources/cassandra.yaml
@@ -0,0 +1,560 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'Test Cluster'
+
+# You should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node. If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, hints will be dropped.
+max_hint_window_in_ms: 3600000 # one hour
+# Sleep this long after delivering each hint
+hinted_handoff_throttle_delay_in_ms: 1
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
+
+# authorization backend, implementing IAuthority; used to limit access/provide permissions
+authority: org.apache.cassandra.auth.AllowAllAuthority
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster. Any IPartitioner may be used, including your
+# own as long as it is on the classpath. Out of the box, Cassandra
+# provides org.apache.cassandra.dht.RandomPartitioner
+# org.apache.cassandra.dht.ByteOrderedPartitioner,
+# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
+# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
+# (deprecated).
+#
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+# When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
+# scanning rows in key order, but the ordering can generate hot spots
+# for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+# UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+# ordering. Use this as an example if you need custom collation.
+#
+# See http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.OrderPreservingPartitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+ - %temp-dir%data
+
+# commit log
+commitlog_directory: %temp-dir%commitlog
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint. And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory: %temp-dir%saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# Configure the Size of the individual Commitlog file. The
+# default is 128 MB, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 16 MB
+# is reasonable.
+#
+# commitlog_segment_size_in_mb: 128
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: ",,"
+ - seeds: "127.0.0.1"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_. Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables. Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread. At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: 7002
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7003
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: localhost
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: localhost
+# port for Thrift to listen for clients on
+rpc_port: 9162
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three options for the RPC Server:
+#
+# sync -> One connection per thread in the rpc pool (see below).
+# For a very large number of clients, memory will be your limiting
+# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
+# Connection pooling is very, very strongly recommended.
+#
+# async -> Nonblocking server implementation with one thread to serve
+# rpc connections. This is not recommended for high throughput use
+# cases. Async has been tested to be about 50% slower than sync
+# or hsha and is deprecated: it will be removed in the next major release.
+#
+# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
+# (see below) is used to manage requests, but the threads are multiplexed
+# across the different clients.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max|thread to set request pool size.
+# You would primarily set max for the sync server to safeguard against
+# misbehaved clients; if you do hit the max, Cassandra will block until one
+# disconnects before accepting more. The defaults for sync are min of 16 and max
+# unlimited.
+#
+# For the Hsha server, the min and max both default to quadruple the number of
+# CPU cores.
+#
+# This configuration is ignored by the async server.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+# 0 disables TFramedTransport in favor of TSocket. This option
+# is deprecated; we strongly recommend using Framed mode.
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns. The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory. Larger rows will spill
+# over to disk and use a slower two-pass compaction process. A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# This setting has no effect on LeveledCompactionStrategy.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable. Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
+# Time to wait for a reply from other nodes before failing the command
+rpc_timeout_in_ms: 10000
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This improves cache locality
+# when disabling read repair, which can further improve throughput.
+# Only appropriate for single-datacenter deployments.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - GossipingPropertyFileSnitch
+# The rack and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via gossip. If
+# cassandra-topology.properties exists, it is used as a fallback, allowing
+# migration from the PropertyFileSnitch.
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's
+# IP address, respectively. Unless this happens to match your
+# deployment conventions (as it did Facebook's), this is best used
+# as an example of writing a custom Snitch class.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the Datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time. The larger the interval,
+# the smaller and less effective the sampling will be. In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample. All the sampled entries
+# must fit in memory. Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs. This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
diff --git a/src/test/resources/log4j.properties b/src/test/resources/log4j.properties
index 7053419..faf0918 100755
--- a/src/test/resources/log4j.properties
+++ b/src/test/resources/log4j.properties
@@ -1,10 +1,10 @@
-log4j.rootLogger=DEBUG, stdout
+log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%5p %d [%c %t] %m%n
# disable cassandra logging for test cases
-log4j.logger.org.apache.cassandra=DEBUG
-log4j.logger.org.apache.cassandra.service=DEBUG
-log4j.logger.org.apache.cassandra.thrift=DEBUG
\ No newline at end of file
+log4j.logger.org.apache.cassandra=INFO
+log4j.logger.org.apache.cassandra.service=INFO
+log4j.logger.org.apache.cassandra.thrift=INFO
\ No newline at end of file
diff --git a/src/test/resources/storage-conf.xml b/src/test/resources/storage-conf.xml
deleted file mode 100755
index ecf9bdc..0000000
--- a/src/test/resources/storage-conf.xml
+++ /dev/null
@@ -1,301 +0,0 @@
-
-
- Test Cluster
-
-
-
-
-
-
-
-
-
-
- org.apache.cassandra.locator.RackUnawareStrategy
-
-
- 1
-
-
- org.apache.cassandra.locator.EndPointSnitch
-
-
-
-
-
- false
-
-
- org.apache.cassandra.auth.AllowAllAuthenticator
-
-
-
- org.apache.cassandra.dht.OrderPreservingPartitioner
-
-
-
-
-
- %temp-dir%commitlog
-
-
- %temp-dir%data
-
-
-
-
-
- 127.0.0.1
-
-
-
-
-
-
- 10000
-
- 128
-
-
-
-
-
- localhost
-
- 7000
-
-
- localhost
-
- 9160
-
- false
-
-
-
-
-
-
-
- auto
-
-
- 512
-
-
- 64
-
-
- 32
- 8
-
-
- 64
-
-
- 64
-
- 256
-
- 0.3
-
- 60
-
-
- 8
- 32
-
-
- periodic
-
- 10000
-
-
-
-
- 864000
-
diff --git a/src/test/scala/com/shorrockin/cascal/CascalSchema.scala b/src/test/scala/com/shorrockin/cascal/CascalSchema.scala
new file mode 100644
index 0000000..4d8cc99
--- /dev/null
+++ b/src/test/scala/com/shorrockin/cascal/CascalSchema.scala
@@ -0,0 +1,37 @@
+package com.shorrockin.cascal
+
+import scala.collection.JavaConversions._
+import org.apache.cassandra.config.{CFMetaData, DatabaseDescriptor, KSMetaData}
+import org.apache.cassandra.db.marshal.{AbstractType, BytesType, CompositeType, CounterColumnType, IntegerType, TimeUUIDType}
+import org.apache.cassandra.db.ColumnFamilyType
+import org.apache.cassandra.locator.SimpleStrategy
+import java.nio.ByteBuffer
+import org.apache.cassandra.config.ColumnDefinition
+import org.apache.cassandra.thrift.IndexType
+import com.shorrockin.cascal.utils.Conversions.byteBuffer
+import org.apache.cassandra.db.marshal.LongType
+import com.shorrockin.cascal.test.Schema
+
+trait CascalSchema extends Schema {
+ val keyspace = "Test"
+ val replicationFactor = 1
+ val timeout = 2000
+ val strategyClass = classOf[SimpleStrategy]
+
+ val colMetaData = Map[ByteBuffer, ColumnDefinition](
+ byteBuffer("column1") -> new ColumnDefinition("column1", BytesType.instance, IndexType.KEYS, null, "column1Indx", 0),
+ byteBuffer("longColumn") -> new ColumnDefinition("longColumn", LongType.instance, IndexType.KEYS, null, "longColumnIndx", 0))
+
+ val standardIndexedCf = cfMetaData("StandardIndexed", ColumnFamilyType.Standard, BytesType.instance)
+ standardIndexedCf.columnMetadata(colMetaData)
+
+ val cfMetaDatas = Seq(
+ cfMetaData("Standard", ColumnFamilyType.Standard, BytesType.instance),
+ cfMetaData("Super", ColumnFamilyType.Super, TimeUUIDType.instance),
+ cfMetaData("SuperBytes", ColumnFamilyType.Super, BytesType.instance),
+ cfMetaData("StandardCounter", ColumnFamilyType.Standard, BytesType.instance).replicateOnWrite(true).defaultValidator(CounterColumnType.instance),
+ cfMetaData("SuperCounter", ColumnFamilyType.Super, BytesType.instance).replicateOnWrite(true).defaultValidator(CounterColumnType.instance),
+ cfMetaData("Composite2", ColumnFamilyType.Standard, CompositeType.getInstance(List(BytesType.instance, IntegerType.instance))),
+ cfMetaData("Composite3", ColumnFamilyType.Standard, CompositeType.getInstance(List(BytesType.instance, LongType.instance, BytesType.instance))),
+ standardIndexedCf)
+}
\ No newline at end of file
diff --git a/src/test/scala/com/shorrockin/cascal/CompositeTest.scala b/src/test/scala/com/shorrockin/cascal/CompositeTest.scala
new file mode 100644
index 0000000..9811406
--- /dev/null
+++ b/src/test/scala/com/shorrockin/cascal/CompositeTest.scala
@@ -0,0 +1,79 @@
+package com.shorrockin.cascal
+
+import scala.language.implicitConversions
+import org.junit.{Assert, Test}
+import com.shorrockin.cascal.utils.Conversions._
+import com.shorrockin.cascal.serialization.TupleSerializer
+import Assert._
+import com.shorrockin.cascal.session.Insert
+import com.shorrockin.cascal.session.RangePredicate
+import com.shorrockin.cascal.session.Order
+
+class CompositeTest extends EmbeddedCassandra {
+
+ @Test def composite2InsertGet = borrow { session =>
+ val name = ("composite name", 1)
+ val col = "Test" \ "Composite2" \ "Insert Get" \ name
+ session.insert(col \ "composite value")
+
+ val colName = session.get(col).get.name
+ assertEquals(name, tuple[String, Int](session.get(col).get.name))
+ }
+
+ @Test def composite3InsertGet = borrow { session =>
+ val name = ("composite name", 1L, "name part 3")
+ val col = "Test" \ "Composite3" \ "Insert Get" \ name
+ session.insert(col \ "composite value")
+
+ val colName = session.get(col).get.name
+ assertEquals(name, tuple[String, Long, String](session.get(col).get.name))
+ }
+
+ @Test def composite2Range = borrow { session =>
+ val key = "Test" \ "Composite2" \ "Composite Range"
+ val col1 = key \ (("composite", 1)) \ 1
+ val col2 = key \ (("composite", 2)) \ 1
+ val col3 = key \ (("composite", 3)) \ 1
+ val col4 = key \ (("composite", 4)) \ 1
+ val col5 = key \ (("composite", 5)) \ 1
+ val col6 = key \ (("comcom", 5)) \ 1
+
+ session.batch(Insert(col1) :: Insert(col2) :: Insert(col3) :: Insert(col4) :: Insert(col5) :: Insert(col6))
+
+ val result1 = session.list(key, RangePredicate(Some(("c", None)), None, Order.Ascending, None))
+ assertEquals(6, result1.size)
+
+ val result2 = session.list(key, RangePredicate(Some(("composite", None)), None, Order.Ascending, None))
+ assertEquals(5, result2.size)
+
+ val result3 = session.list(key, RangePredicate(("composite", 2), ("composite", 4)))
+ assertEquals(3, result3.size)
+ assertEquals(col2, result3(0))
+ assertEquals(col3, result3(1))
+ assertEquals(col4, result3(2))
+ }
+
+ @Test def composite3Range = borrow { session =>
+ val key = "Test" \ "Composite3" \ "Composite Range"
+ val col1 = key \ (("composite", 1L, "A")) \ 1
+ val col2 = key \ (("composite", 1L, "a")) \ 1
+ val col3 = key \ (("composite", 10L, "A")) \ 1
+
+ session.batch(Insert(col1) :: Insert(col2) :: Insert(col3))
+
+ val result0 = session.list(key, RangePredicate(Some(("composite", None)), None, Order.Descending, None))
+ assertEquals(0, result0.size)
+
+ val result1 = session.list(key, RangePredicate(Some(("composite", None)), None, Order.Ascending, None))
+ assertEquals(3, result1.size)
+
+ val result2 = session.list(key, RangePredicate(("composite", 1L), ("composite", 2L)))
+ assertEquals(2, result2.size)
+ assertEquals(col1, result2(0))
+ assertEquals(col2, result2(1))
+
+ val result3 = session.list(key, RangePredicate(("composite", 1L, "B"), ("composite", 2L)))
+ assertEquals(1, result3.size)
+ assertEquals(col2, result3(0))
+ }
+}
\ No newline at end of file
diff --git a/src/test/scala/com/shorrockin/cascal/CounterCoulmnTest.scala b/src/test/scala/com/shorrockin/cascal/CounterCoulmnTest.scala
new file mode 100644
index 0000000..04c2858
--- /dev/null
+++ b/src/test/scala/com/shorrockin/cascal/CounterCoulmnTest.scala
@@ -0,0 +1,101 @@
+package com.shorrockin.cascal
+
+import org.junit.{Assert, Test}
+import com.shorrockin.cascal.session.KeyRange
+import com.shorrockin.cascal.session.EmptyPredicate
+import com.shorrockin.cascal.session.RangePredicate
+import com.shorrockin.cascal.session.ColumnPredicate
+import com.shorrockin.cascal.session.Add
+import com.shorrockin.cascal.session.Delete
+
+class CounterCoulmnTest extends EmbeddedCassandra {
+ import com.shorrockin.cascal.utils.Conversions._
+ import Assert._
+
+ @Test def addAndGetCounter = borrow { session =>
+
+ val col = "Test" \# "StandardCounter" \ "Test" \ "col name"
+ assertTrue(session.get(col).isEmpty)
+
+ session.add(col + 1)
+ assertEquals(Some(1), session.get(col).get.value)
+
+ session.add(col + 10)
+ assertEquals(Some(11), session.get(col).get.value)
+
+ session.add(col - 1)
+ assertEquals(Some(10), session.get(col).get.value)
+ }
+
+ @Test def getCounterRowsUsingKeyRange = borrow { session =>
+ val cf = "Test" \# "StandardCounter"
+ session.add(cf \ "range1" \ "col1" + 1)
+ session.add(cf \ "range2" \ "col1" - 100)
+ session.add(cf \ "range3" \ "col1" + 23)
+
+ val results = session.list(cf, KeyRange("range1", "range3", 100))
+ assertEquals(3, results.size)
+ }
+
+ @Test def getCounterRowsUsingMultiGet = borrow { session =>
+ val key1 = "Test" \# "StandardCounter" \ "container1"
+ val key2 = "Test" \# "StandardCounter" \ "container2"
+ val key3 = "Test" \# "StandardCounter" \ "container3"
+
+ session.add(key1 \ "col1" + 1)
+ session.add(key2 \ "col1" - 100)
+ session.add(key3 \ "col1" + 23)
+
+ val results = session.list(key1 :: key2 :: key3 :: Nil)
+ assertEquals(3, results.size)
+ }
+
+ @Test def getKeyCounters = borrow { session =>
+ val key = "Test" \# "StandardCounter" \ "key counters"
+ session.add(key \ "col1" + 1)
+ session.add(key \ "col2" - 2)
+ session.add(key \ "col3" + 3)
+
+ val rangeResults = session.list(key, RangePredicate("col1", "col3"))
+ assertEquals(3, rangeResults.size)
+
+ val predicateResults = session.list(key, ColumnPredicate(List("col1", "col3")))
+ assertEquals(2, predicateResults.size)
+ }
+
+ @Test def removeCounterColumn = borrow { session =>
+ val col = "Test" \# "StandardCounter" \ "testremove" \ "col1" + 1
+ session.add(col)
+ assertEquals(Some(1), session.get(col).get.value)
+
+ session.remove(col)
+ assertEquals(None, session.get(col))
+ }
+
+ @Test def counterBatchAddAndDelete = borrow { session =>
+ val key = "Test" \# "StandardCounter" \ "test batch"
+ val col1 = key \ ("Column-1", + 10)
+ val col2 = key \ ("Column-2", + 2)
+ val col3 = key \ ("Column-3", - 500)
+
+ session.batch(Add(col1) :: Add(col2) :: Add(col3))
+ assertEquals(3, session.list(key).size)
+
+ session.batch(Delete(key, ColumnPredicate(col2.name :: col3.name :: Nil)) :: Nil)
+ assertEquals(1, session.list(key).size)
+ }
+
+ @Test def countCounterColumns = borrow { session =>
+ val key1 = "Test" \# "StandardCounter" \ "count columns key1"
+ session.add(key1 \ "col1" + 1)
+ session.add(key1 \ "col2" - 100)
+ session.add(key1 \ "col3" + 23)
+
+ val key2 = "Test" \# "StandardCounter" \ "count columns key2"
+ session.add(key2 \ "col1" + 1)
+
+ val results = session.count(key1 :: key2 :: Nil)
+ assertEquals(3, results(key1))
+ assertEquals(1, results(key2))
+ }
+}
diff --git a/src/test/scala/com/shorrockin/cascal/CounterSuperColumnTest.scala b/src/test/scala/com/shorrockin/cascal/CounterSuperColumnTest.scala
new file mode 100644
index 0000000..6a4c1a1
--- /dev/null
+++ b/src/test/scala/com/shorrockin/cascal/CounterSuperColumnTest.scala
@@ -0,0 +1,106 @@
+package com.shorrockin.cascal
+
+import org.junit.{Assert, Test}
+import com.shorrockin.cascal.session.KeyRange
+import com.shorrockin.cascal.session.RangePredicate
+import com.shorrockin.cascal.session.ColumnPredicate
+import com.shorrockin.cascal.session.Add
+import com.shorrockin.cascal.session.Delete
+
+class CounterSuperColumnTest extends EmbeddedCassandra {
+ import com.shorrockin.cascal.utils.Conversions._
+ import Assert._
+
+ @Test def addAndGetCounter = borrow { session =>
+
+ val col = "Test" \\# "SuperCounter" \ "Add and get key" \ "super" \ "col name"
+ assertTrue(session.get(col).isEmpty)
+
+ session.add(col + 1)
+ assertEquals(Some(1), session.get(col).get.value)
+
+ session.add(col + 10)
+ assertEquals(Some(11), session.get(col).get.value)
+
+ session.add(col - 1)
+ assertEquals(Some(10), session.get(col).get.value)
+ }
+
+ @Test def getCounterRowsUsingKeyRange = borrow { session =>
+ val cf = "Test" \\# "SuperCounter"
+ session.add(cf \ "range1" \ "super" \ "col1" + 1)
+ session.add(cf \ "range2" \ "super" \ "col1" - 100)
+ session.add(cf \ "range3" \ "super" \ "col1" + 23)
+
+ val results = session.list(cf, KeyRange("range1", "range3", 100))
+ assertEquals(3, results.size)
+ }
+
+ @Test def getCounterRowsUsingMultiGet = borrow { session =>
+ val key1 = "Test" \\# "SuperCounter" \ "container1"
+ val key2 = "Test" \\# "SuperCounter" \ "container2"
+ val key3 = "Test" \\# "SuperCounter" \ "container3"
+
+ session.add(key1 \ "super" \ "col1" + 1)
+ session.add(key2 \ "super" \ "col1" - 100)
+ session.add(key3 \ "super" \ "col1" + 23)
+
+ val results = session.list(key1 :: key2 :: key3 :: Nil)
+ assertEquals(3, results.size)
+ }
+
+ @Test def getKeyCounters = borrow { session =>
+ val key = "Test" \\# "SuperCounter" \ "key counters"
+ session.add(key \ "super1" \ "col1" + 1)
+ session.add(key \ "super2" \ "col2" - 2)
+ session.add(key \ "super3" \ "col3" + 3)
+
+ val rangeResults = session.list(key, RangePredicate("super1", "super3"))
+ assertEquals(3, rangeResults.size)
+
+ val predicateResults = session.list(key, ColumnPredicate(List("super1", "super2")))
+ assertEquals(2, predicateResults.size)
+ }
+
+ @Test def removeCounterColumn = borrow { session =>
+ val col = "Test" \\# "SuperCounter" \ "testremove" \ "super" \ "col1" + 1
+ session.add(col)
+ assertEquals(Some(1), session.get(col).get.value)
+
+ session.remove(col)
+ assertEquals(None, session.get(col))
+ }
+
+ @Test def counterBatchAddAndDelete = borrow { session =>
+ val key = "Test" \\# "SuperCounter" \ "test batch"
+ val col1 = key \ "super" \ ("Column-1", + 10)
+ val col2 = key \ "super" \ ("Column-2", + 2)
+ val col3 = key \ "super" \ ("Column-3", - 500)
+
+ session.batch(Add(col1) :: Add(col2) :: Add(col3))
+ assertEquals(3, session.list(key \ "super").size)
+
+ session.batch(Delete(key, ColumnPredicate(col2.name :: col3.name :: Nil)) :: Nil)
+ assertEquals(1, session.list(key).size)
+ }
+
+ @Test def countCounterColumns = borrow { session =>
+ val key1 = "Test" \\# "SuperCounter" \ "count columns key1"
+ val key1Super = key1 \ "super"
+ session.add(key1Super \ "col1" + 1)
+ session.add(key1Super \ "col2" - 100)
+ session.add(key1Super \ "col3" + 23)
+
+ val key2 = "Test" \\# "SuperCounter" \ "count columns key2"
+ val key2Super = key2 \ "super"
+ session.add(key2Super \ "col1" + 1)
+
+ val keyResults = session.count(key1 :: key2 :: Nil)
+ assertEquals(1, keyResults(key1))
+ assertEquals(1, keyResults(key2))
+
+ val superResults = session.count(key1Super :: key2Super :: Nil)
+ assertEquals(3, superResults(key1Super))
+ assertEquals(1, superResults(key2Super))
+ }
+}
\ No newline at end of file
diff --git a/src/test/scala/com/shorrockin/cascal/EmbeddedCassandra.scala b/src/test/scala/com/shorrockin/cascal/EmbeddedCassandra.scala
new file mode 100644
index 0000000..e5d5f98
--- /dev/null
+++ b/src/test/scala/com/shorrockin/cascal/EmbeddedCassandra.scala
@@ -0,0 +1,4 @@
+package com.shorrockin.cascal
+import com.shorrockin.cascal.test.EmbeddedPool
+
+trait EmbeddedCassandra extends EmbeddedPool with CascalSchema
\ No newline at end of file
diff --git a/src/test/scala/com/shorrockin/cascal/InsertGetTest.scala b/src/test/scala/com/shorrockin/cascal/InsertGetTest.scala
new file mode 100644
index 0000000..be9ede0
--- /dev/null
+++ b/src/test/scala/com/shorrockin/cascal/InsertGetTest.scala
@@ -0,0 +1,19 @@
+package com.shorrockin.cascal
+
+import org.junit.{Assert, Test}
+import com.shorrockin.cascal.utils.Utils
+import com.shorrockin.cascal.utils.UUID
+
+class InsertGetTest extends EmbeddedCassandra {
+ import com.shorrockin.cascal.utils.Conversions._
+ import Assert._
+
+ @Test def testInsertGet = borrow { session =>
+
+ val col = "Test" \ "Standard" \ "Test" \ "col name"
+ assertTrue(session.get(col).isEmpty)
+
+ session.insert(col \ "col value")
+ assertEquals("col value", string(session.get(col).get.value))
+ }
+}
\ No newline at end of file
diff --git a/src/test/scala/com/shorrockin/cascal/TestBatchOperations.scala b/src/test/scala/com/shorrockin/cascal/TestBatchOperations.scala
index 1cb0002..9c7de7a 100644
--- a/src/test/scala/com/shorrockin/cascal/TestBatchOperations.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestBatchOperations.scala
@@ -1,12 +1,11 @@
package com.shorrockin.cascal
-import testing._
import model.Column
import org.junit.{Assert, Test}
import session.{ColumnPredicate, Delete, Insert}
import utils.{UUID, Conversions}
-class TestBatchOperations extends CassandraTestPool {
+class TestBatchOperations extends EmbeddedCassandra {
import Conversions._
import Assert._
@@ -50,17 +49,19 @@ class TestBatchOperations extends CassandraTestPool {
val col2 = key \ ("Column-2", "Value-2")
val col3 = key \ ("Column-3", "Value-3")
val col4 = key \ ("Column-4", "Value-4")
+ val col5 = key \ ("Column-5", "Value-5")
- s.batch(Insert(col1) :: Insert(col2) :: Insert(col3))
- assertEquals(3, s.list(key).size)
+ s.batch(Insert(col1) :: Insert(col2) :: Insert(col3) :: Insert(col4))
+ assertEquals(4, s.list(key).size)
- s.batch(Delete(key, ColumnPredicate(col2.name :: col3.name :: Nil)) :: Insert(col4))
+ s.batch(Delete(key, ColumnPredicate(col2.name :: col3.name :: Nil)) :: Delete(col4) :: Insert(col5))
assertEquals(2, s.list(key).size)
assertEquals(None, s.get(col2))
assertEquals(None, s.get(col3))
+ assertEquals(None, s.get(col4))
assertEquals("Value-1", string(s.get(col1).get.value))
- assertEquals("Value-4", string(s.get(col4).get.value))
+ assertEquals("Value-5", string(s.get(col5).get.value))
}
@Test def testBatchSuperDelete = borrow { (s) =>
diff --git a/src/test/scala/com/shorrockin/cascal/TestEmbeddedCassandra.scala b/src/test/scala/com/shorrockin/cascal/TestEmbeddedCassandra.scala
index 901ec84..b0ec9f2 100755
--- a/src/test/scala/com/shorrockin/cascal/TestEmbeddedCassandra.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestEmbeddedCassandra.scala
@@ -1,10 +1,9 @@
package com.shorrockin.cascal
-import testing._
import org.junit.{Assert, Test}
import utils.Conversions
-class TestEmbeddedCassandra extends CassandraTestPool {
+class TestEmbeddedCassandra extends EmbeddedCassandra {
import Assert._
import Conversions._
diff --git a/src/test/scala/com/shorrockin/cascal/TestInsertRemoveLoop.scala b/src/test/scala/com/shorrockin/cascal/TestInsertRemoveLoop.scala
index dad0b8d..6116a74 100755
--- a/src/test/scala/com/shorrockin/cascal/TestInsertRemoveLoop.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestInsertRemoveLoop.scala
@@ -1,6 +1,5 @@
package com.shorrockin.cascal
-import testing._
import org.junit.{Assert, Test}
import com.shorrockin.cascal.utils.Utils
@@ -8,7 +7,7 @@ import com.shorrockin.cascal.utils.Utils
* tests a looping insert remove. Stresses out the precision of
* system time.
*/
-class TestInsertRemoveLoop extends CassandraTestPool {
+class TestInsertRemoveLoop extends EmbeddedCassandra {
import com.shorrockin.cascal.utils.Conversions._
import Assert._
@@ -17,9 +16,10 @@ class TestInsertRemoveLoop extends CassandraTestPool {
def checkLowResolution = {
var onLowPrecisionSystem = false
for( i <- 1L to 100L ) {
+ val colName = "col" + i
session.remove("Test" \ "Standard" \ "Test")
- session.insert("Test" \ "Standard" \ "Test" \ (i, "hello:"+i))
- if( session.get("Test" \ "Standard" \ "Test" \ i) == None ) {
+ session.insert("Test" \ "Standard" \ "Test" \ (colName, "value:"+i))
+ if( session.get("Test" \ "Standard" \ "Test" \ colName) == None ) {
onLowPrecisionSystem = true
}
}
diff --git a/src/test/scala/com/shorrockin/cascal/TestKeyRangeList.scala b/src/test/scala/com/shorrockin/cascal/TestKeyRangeList.scala
index 65ff901..3d7e688 100644
--- a/src/test/scala/com/shorrockin/cascal/TestKeyRangeList.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestKeyRangeList.scala
@@ -1,12 +1,11 @@
package com.shorrockin.cascal
-import testing._
import model.{ColumnFamily, Key, StandardKey}
import org.junit.{Assert, Test}
import session.{Consistency, EmptyPredicate, KeyRange}
import utils.Conversions
-class TestKeyRangeList extends CassandraTestPool {
+class TestKeyRangeList extends EmbeddedCassandra {
import Assert._
import Conversions._
diff --git a/src/test/scala/com/shorrockin/cascal/TestMultiKeyList.scala b/src/test/scala/com/shorrockin/cascal/TestMultiKeyList.scala
index ae15cab..4c701b9 100755
--- a/src/test/scala/com/shorrockin/cascal/TestMultiKeyList.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestMultiKeyList.scala
@@ -1,7 +1,6 @@
package com.shorrockin.cascal
import model._
-import testing._
import org.junit.{Assert, Test}
import session.{Session, ColumnPredicate, Order, RangePredicate}
import utils.{UUID, Conversions}
@@ -10,7 +9,7 @@ import utils.{UUID, Conversions}
* tests our ability to list multiple keys, translates to a cassandra
* multiget_slice
*/
-class TestMultiKeyList extends CassandraTestPool {
+class TestMultiKeyList extends EmbeddedCassandra {
import Assert._
import Conversions._
@@ -56,7 +55,7 @@ class TestMultiKeyList extends CassandraTestPool {
assertEquals(2, locate(results, key3).size)
// try out a column predicate
- val columns = List(bytes("col-1"), bytes("col-2"))
+ val columns = List(byteBuffer("col-1"), byteBuffer("col-2"))
results = session.list(key1 :: key2 :: key3, ColumnPredicate(columns))
assertEquals(3, results.size)
assertEquals(2, locate(results, key1).size)
diff --git a/src/test/scala/com/shorrockin/cascal/TestPathComposition.scala b/src/test/scala/com/shorrockin/cascal/TestPathComposition.scala
index 52e9439..2b29212 100644
--- a/src/test/scala/com/shorrockin/cascal/TestPathComposition.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestPathComposition.scala
@@ -1,6 +1,5 @@
package com.shorrockin.cascal
-import testing._
import org.junit.{Assert, Test}
/**
diff --git a/src/test/scala/com/shorrockin/cascal/TestRemoval.scala b/src/test/scala/com/shorrockin/cascal/TestRemoval.scala
index 283a65a..14546ff 100644
--- a/src/test/scala/com/shorrockin/cascal/TestRemoval.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestRemoval.scala
@@ -1,13 +1,12 @@
package com.shorrockin.cascal
-import testing._
import org.junit.{Assert, Test}
import com.shorrockin.cascal.utils.UUID
/**
* tests session removal
*/
-class TestRemoval extends CassandraTestPool {
+class TestRemoval extends EmbeddedCassandra {
import com.shorrockin.cascal.utils.Conversions._
import Assert._
@@ -32,4 +31,13 @@ class TestRemoval extends CassandraTestPool {
assertEquals(None, s.get(std))
assertEquals(None, s.get(sup))
}
+
+ @Test def testTruncate = borrow { (s) =>
+ val col = "Test" \ "Standard" \ UUID() \ "Column"
+ val std = s.insert(col \ "Value")
+ assertEquals("Value", new String(s.get(col).get.value.array))
+ s.truncate("Standard")
+
+ assertEquals(None, s.get(col))
+ }
}
diff --git a/src/test/scala/com/shorrockin/cascal/TestSecondaryIndex.scala b/src/test/scala/com/shorrockin/cascal/TestSecondaryIndex.scala
new file mode 100644
index 0000000..e355b4c
--- /dev/null
+++ b/src/test/scala/com/shorrockin/cascal/TestSecondaryIndex.scala
@@ -0,0 +1,102 @@
+package com.shorrockin.cascal
+
+import org.junit.{Assert, Test}
+import com.shorrockin.cascal.utils.Conversions._
+import Assert._
+
+class TestSeconderyIndex extends EmbeddedCassandra {
+
+ val family = "Test" \ "StandardIndexed"
+ val key1 = family \ "key1"
+ val key2 = family \ "key2"
+
+ @Test def eqExpression = borrow { (session) =>
+
+ session.insert(key1 \ "column1" \ "b")
+ session.insert(key1 \ "column2" \ "c")
+
+ session.insert(key2 \ "column1" \ "b")
+
+ val query = family where "column1" Eq "b" startAt "a"
+
+ val rows = session.list(query)
+ assertEquals(2, rows.size)
+ }
+
+ @Test def eqExpressionForLongType = borrow { (session) =>
+
+ session.insert(key1 \ "longColumn" \ 1L)
+ session.insert(key2 \ "longColumn" \ 2L)
+
+ val query = family where "longColumn" Eq 1L startAt 0
+
+ val rows = session.list(query)
+ assertEquals(1, rows.size)
+ }
+
+ @Test def eqAndGtExpression = borrow { (session) =>
+
+ session.insert(key1 \ "column1" \ "b")
+ session.insert(key1 \ "column2" \ "c")
+
+ session.insert(key2 \ "column1" \ "b")
+ session.insert(key2 \ "column2" \ "b")
+
+ val query = family where "column1" Eq "b" and "column2" Gt "b" startAt "a"
+ val rows = session.list(query)
+ assertEquals(1, rows.size)
+ }
+
+ @Test def eqAndGteExpression = borrow { (session) =>
+
+ session.insert(key1 \ "column1" \ "b")
+ session.insert(key1 \ "column2" \ "c")
+
+ session.insert(key2 \ "column1" \ "b")
+ session.insert(key2 \ "column2" \ "b")
+
+ val query = family where "column1" Eq "b" and "column2" Gte "b" startAt "a"
+ val rows = session.list(query)
+ assertEquals(2, rows.size)
+ }
+
+ @Test def eqAndLtExpression = borrow { (session) =>
+
+ session.insert(key1 \ "column1" \ "b")
+ session.insert(key1 \ "column2" \ "a")
+
+ session.insert(key2 \ "column1" \ "b")
+ session.insert(key2 \ "column2" \ "b")
+
+ val query = family where "column1" Eq "b" and "column2" Lt "b" startAt "a"
+ val rows = session.list(query)
+ assertEquals(1, rows.size)
+ }
+
+ @Test def eqAndLteExpression = borrow { (session) =>
+
+ session.insert(key1 \ "column1" \ "b")
+ session.insert(key1 \ "column2" \ "a")
+
+ session.insert(key2 \ "column1" \ "b")
+ session.insert(key2 \ "column2" \ "b")
+
+ val query = family where "column1" Eq "b" and "column2" Lte "b" startAt "a"
+ val rows = session.list(query)
+ assertEquals(2, rows.size)
+ }
+
+ @Test def limitNumberOfResult = borrow { (session) =>
+
+ session.insert(key1 \ "column1" \ "b")
+ session.insert(key1 \ "column2" \ "a")
+
+ session.insert(key2 \ "column1" \ "b")
+ session.insert(key2 \ "column2" \ "b")
+
+ val query = family where "column1" Eq "b" and "column2" Lte "b" startAt "a" limit 1
+ val rows = session.list(query)
+ assertEquals(1, rows.size)
+ }
+
+}
\ No newline at end of file
diff --git a/src/test/scala/com/shorrockin/cascal/TestSerialization.scala b/src/test/scala/com/shorrockin/cascal/TestSerialization.scala
index d0794f2..bc2f6be 100755
--- a/src/test/scala/com/shorrockin/cascal/TestSerialization.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestSerialization.scala
@@ -86,29 +86,35 @@ class TestSerialization {
assertEquals("Bar", objects(1).value.get)
}
- @Test def testCanConvertFromColumnsToMappedSuper() {
+ @Test def convertFromColumnsToMappedSuper() {
val now = new Date
val key = "Test" \\ "Super" \ "Hello"
val sc = key \ "Super Column Value"
val colb = sc \ "Column-B" \ now
val colc = sc \ "Column-C" \ 12L
- val cold = sc \ "Column-D" \ 13L
val obj = Converter[MappedSuper](colc :: colb)
assertEquals("Hello", obj.a)
assertEquals("Super Column Value", obj.s)
assertEquals(now, obj.b)
- assertEquals(12L, obj.c)
+ assertEquals(12L, obj.c)
+ }
- val obj2 = Converter[MappedSuperWithCols](colc :: cold)
- assertEquals("Hello", obj.a)
- assertEquals("Super Column Value", obj.s)
+ @Test def convertFromColumnsToMappedSuperWithCols() {
+ val now = new Date
+ val key = "Test" \\ "Super" \ "Hello"
+ val sc = key \ "Super Column Value"
+ val cold = sc \ "Column-C" \ 12L
+ val cole = sc \ "Column-D" \ 13L
+
+ val obj2 = Converter[MappedSuperWithCols](cold :: cole)
+ assertEquals("Hello", obj2.a)
+ assertEquals("Super Column Value", obj2.s)
assertTrue(obj2.values.contains("Column-C"));
assertTrue(obj2.values.contains("Column-D"));
}
-
-
+
@Test def testCanConvertObjectToStandardColumnList() {
}
diff --git a/src/test/scala/com/shorrockin/cascal/TestSessionPool.scala b/src/test/scala/com/shorrockin/cascal/TestSessionPool.scala
index 8c055a6..13ea95e 100644
--- a/src/test/scala/com/shorrockin/cascal/TestSessionPool.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestSessionPool.scala
@@ -1,48 +1,48 @@
package com.shorrockin.cascal
-import testing._
import session._
import utils.{UUID, Conversions}
import org.junit.{Assert, Test}
+import com.shorrockin.cascal.test.EmbeddedCassandraServer
-class TestSessionPool {
+class TestSessionPool extends EmbeddedCassandra {
import Conversions._
import Assert._
- @Test def testSessionPool = {
- EmbeddedTestCassandra.init
-
- val hosts = Host("localhost", 9160, 250) :: Host("localhost", 9161, 1)
- val params = new PoolParams(10, ExhaustionPolicy.Fail, 500L, 6, 2)
- val pool = new SessionPool(hosts, params, Consistency.One)
-
- // as long as no exceptions were thrown we passed
- (0 until 10).foreach { index =>
- pool.borrow { _.count("Test" \ "Standard" \ UUID()) }
- }
-
- assertEquals(1, pool.idle)
- pool.close
- assertEquals(0, pool.idle)
- }
-
- @Test def testErrorCatchingAndLogging = {
- EmbeddedTestCassandra.init
-
- val hosts = Host("localhost", 9160, 250) :: Nil
- val params = new PoolParams(10, ExhaustionPolicy.Fail, 500L, 6, 2)
- val pool = new SessionPool(hosts, params, Consistency.One)
-
- pool.borrow { session =>
- try {
- session.count("Non Existant" \ "Nope" \ "Nice Try")
- } catch {
- case e:Throwable => /* ignore */
- }
-
- assertTrue(session.hasError)
- }
-
- assertEquals(0, pool.idle)
- }
+// @Test def testSessionPool = {
+// EmbeddedCassandraServer.init
+//
+// val hosts = Host(host, port, 250) :: Host(host, port+1, 250)
+// val params = new PoolParams(10, ExhaustionPolicy.Fail, 500L, 6, 2)
+// val pool = new SessionPool(hosts, params, Consistency.One)
+//
+// // as long as no exceptions were thrown we passed
+// (0 until 10).foreach { index =>
+// pool.borrow { _.count("Test" \ "Standard" \ UUID()) }
+// }
+//
+// assertEquals(1, pool.idle)
+// pool.close
+// assertEquals(0, pool.idle)
+// }
+//
+// @Test def testErrorCatchingAndLogging = {
+// EmbeddedCassandraServer.init
+//
+// val hosts = Host(host, port, 250) :: Nil
+// val params = new PoolParams(10, ExhaustionPolicy.Fail, 500L, 6, 2)
+// val pool = new SessionPool(hosts, params, Consistency.One)
+//
+// pool.borrow { session =>
+// try {
+// session.count("Non Existant" \ "Nope" \ "Nice Try")
+// } catch {
+// case e:Throwable => /* ignore */
+// }
+//
+// assertTrue(session.hasError)
+// }
+//
+// assertEquals(0, pool.idle)
+// }
}
diff --git a/src/test/scala/com/shorrockin/cascal/TestStandardInesrtAndList.scala b/src/test/scala/com/shorrockin/cascal/TestStandardInesrtAndList.scala
index 5044f95..2dbe93f 100644
--- a/src/test/scala/com/shorrockin/cascal/TestStandardInesrtAndList.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestStandardInesrtAndList.scala
@@ -1,16 +1,18 @@
package com.shorrockin.cascal
-import testing._
import model.Column
import org.junit.{Assert, Test}
import java.util.Date
import session.{ColumnPredicate, RangePredicate}
import utils.{Conversions, Logging}
-class TestStandardInesrtAndList extends CassandraTestPool with Logging {
+class TestStandardInesrtAndList extends EmbeddedCassandra with Logging {
import Assert._
import Conversions._
+ val family = "Test" \ "Standard"
+ val key = family \ "testStandardFunctions"
+
@Test def testStandardFunctions = borrow {
(session) =>
import session._
@@ -19,9 +21,6 @@ class TestStandardInesrtAndList extends CassandraTestPool with Logging {
log.debug("Version: " + version)
log.debug("Keyspaces: " + keyspaces.mkString("", ",", ""))
- val family = "Test" \ "Standard"
- val key = family \ "testStandardFunctions"
-
insert(key \ "Column-1" \ "Value-1")
insert(key \ "Column-2" \ "Value-2")
insert(key \ "Column-3" \ "Value-3")
@@ -75,5 +74,21 @@ class TestStandardInesrtAndList extends CassandraTestPool with Logging {
}
+
+ @Test def insertGetWithTTL = borrow {
+ (session) =>
+ import session._
+
+ insert(key \ "Column-1" \ "Value-1" ! 1)
+ val column = get(key \ "Column-1")
+ assertEquals("Value-1", string(column.get.value))
+ assertEquals("Column-1", string(column.get.name))
+ assertEquals(Some(1), column.get.ttl)
+
+ Thread.sleep(2000);
+
+ val expiredCol = get(key \ "Column-1")
+ assertTrue(expiredCol.isEmpty)
+ }
}
diff --git a/src/test/scala/com/shorrockin/cascal/TestSuperInsertAndList.scala b/src/test/scala/com/shorrockin/cascal/TestSuperInsertAndList.scala
index 1f48049..6bfd3f9 100644
--- a/src/test/scala/com/shorrockin/cascal/TestSuperInsertAndList.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestSuperInsertAndList.scala
@@ -1,11 +1,11 @@
package com.shorrockin.cascal
-import testing._
import model.{Column, SuperColumn}
import org.junit.{Test, Assert}
import utils.{UUID, Conversions, Logging}
+import java.nio.ByteBuffer
-class TestSuperInsertAndList extends CassandraTestPool with Logging {
+class TestSuperInsertAndList extends EmbeddedCassandra with Logging {
import Assert._
import Conversions._
@@ -101,10 +101,10 @@ class TestSuperInsertAndList extends CassandraTestPool with Logging {
}
- def locate(l: Seq[(SuperColumn, Seq[Column[_]])], value: Array[Byte]): Seq[Column[_]] = {
+ def locate(l: Seq[(SuperColumn, Seq[Column[_]])], value: ByteBuffer): Seq[Column[_]] = {
l.foreach {
(tuple) =>
- if (java.util.Arrays.equals(tuple._1.value, value)) {
+ if (java.util.Arrays.equals(tuple._1.value.array, value.array)) {
return tuple._2
}
}
diff --git a/src/test/scala/com/shorrockin/cascal/TestUUIDByteConversion.scala b/src/test/scala/com/shorrockin/cascal/TestUUIDByteConversion.scala
index be73c18..3a5b0fb 100755
--- a/src/test/scala/com/shorrockin/cascal/TestUUIDByteConversion.scala
+++ b/src/test/scala/com/shorrockin/cascal/TestUUIDByteConversion.scala
@@ -2,7 +2,6 @@ package com.shorrockin.cascal
import serialization.annotations._
import serialization.Converter
-import testing._
import org.junit.{Assert, Test}
import utils.{UUID, Conversions}
import java.util.{UUID => JavaUUID, Date}
@@ -10,13 +9,13 @@ import java.util.{UUID => JavaUUID, Date}
/**
* tests the UUID ability to convert to and from bytes, strings, etc.
*/
-class TestUUIDByteConversion extends CassandraTestPool {
+class TestUUIDByteConversion extends EmbeddedCassandra {
import Assert._
import Conversions._
@Test def ensureUUIDConvertsToFromBytes = {
val original = UUID()
- assertEquals(original, UUID(Conversions.bytes(original)))
+ assertEquals(original, UUID(Conversions.byteBuffer(original)))
}
@Test def ensureUUIDConvertsToStringAndBack = {