2014-09-03 25 views
1

我一直在試圖理解火花流和hbase如何連接,但一直沒有成功。我正在嘗試做的是給出一個spark流,處理該流並將結果存儲在hbase表中。到目前爲止,這是我的:帶有過濾邏輯的Spark流入HBase

import org.apache.spark.SparkConf 
import org.apache.spark.streaming.{Seconds, StreamingContext} 
import org.apache.spark.streaming.StreamingContext._ 
import org.apache.spark.storage.StorageLevel 
import org.apache.hadoop.hbase.HBaseConfiguration 
import org.apache.hadoop.hbase.client.{HBaseAdmin,HTable,Put,Get} 
import org.apache.hadoop.hbase.util.Bytes 

def blah(row: Array[String]) { 
    val hConf = new HBaseConfiguration() 
    val hTable = new HTable(hConf, "table") 
    val thePut = new Put(Bytes.toBytes(row(0))) 
    thePut.add(Bytes.toBytes("cf"), Bytes.toBytes(row(0)), Bytes.toBytes(row(0))) 
    hTable.put(thePut) 
} 

val ssc = new StreamingContext(sc, Seconds(1)) 
val lines = ssc.socketTextStream("localhost", 9999, StorageLevel.MEMORY_AND_DISK_SER) 
val words = lines.map(_.split(",")) 
val store = words.foreachRDD(rdd => rdd.foreach(blah)) 
ssc.start() 

我目前正在spark-shell中運行上面的代碼。我不確定我做錯了什麼。
我得到的外殼下面的錯誤:

14/09/03 16:21:03 ERROR scheduler.JobScheduler: Error running job streaming job 1409786463000 ms.0 

org.apache.spark.SparkException: Job aborted due to stage failure: Task not serializable: java.io.NotSerializableException: org.apache.spark.streaming.StreamingContext 

at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033) 

at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017) 

at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015) 

at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) 

at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) 

at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015) 

at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$submitMissingTasks(DAGScheduler.scala:770) 

at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$submitStage(DAGScheduler.scala:713) 

at org.apache.spark.scheduler.DAGScheduler.handleJobSubmitted(DAGScheduler.scala:697) 

at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1176) 

at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498) 

at akka.actor.ActorCell.invoke(ActorCell.scala:456) 

at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237) 

at akka.dispatch.Mailbox.run(Mailbox.scala:219) 

at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386) 

at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) 

at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) 

at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) 

at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) 

我也雙重檢查HBase的表,以防萬一,並沒有什麼新的是寫在那裏。

我在另一個終端上運行nc -lk 9999將數據送入spark-shell進行測試。

+0

你能粘貼完整的音軌嗎?你應該能夠得到哪個類導致這個錯誤。 – zsxwing 2014-09-04 02:41:49

+0

沒有一個hbase類是可序列化的 - 確保你不會無意中序列化它們。我在代碼中看不到任何明顯的東西 – David 2014-09-04 15:28:03

回答

1

在Spark用戶組的用戶的幫助下,我能夠弄清楚如何使這個工作。它看起來像我需要環繞一個序列化對象我流,映射和foreach電話:

import org.apache.spark.SparkConf 
import org.apache.spark.streaming.{Seconds, StreamingContext} 
import org.apache.spark.streaming.StreamingContext._ 
import org.apache.spark.storage.StorageLevel 
import org.apache.hadoop.hbase.HBaseConfiguration 
import org.apache.hadoop.hbase.client.{HBaseAdmin,HTable,Put,Get} 
import org.apache.hadoop.hbase.util.Bytes 

object Blaher { 
    def blah(row: Array[String]) { 
    val hConf = new HBaseConfiguration() 
    val hTable = new HTable(hConf, "table") 
    val thePut = new Put(Bytes.toBytes(row(0))) 
    thePut.add(Bytes.toBytes("cf"), Bytes.toBytes(row(0)), Bytes.toBytes(row(0))) 
    hTable.put(thePut) 
    } 
} 

object TheMain extends Serializable{ 
    def run() { 
    val ssc = new StreamingContext(sc, Seconds(1)) 
    val lines = ssc.socketTextStream("localhost", 9999, StorageLevel.MEMORY_AND_DISK_SER) 
    val words = lines.map(_.split(",")) 
    val store = words.foreachRDD(rdd => rdd.foreach(Blaher.blah)) 
    ssc.start() 
    } 
} 

TheMain.run() 
+2

爲了向其他人澄清,它之前失敗的原因可能是因爲函數等於主函數。所以當在rdd.foreach()中使用了blah時,blah函數的關閉需要被序列化,並且閉包括主函數中的其他對象。所以系統無意中試圖序列化不必要的東西。將blah函數移至不同的對象可以通過保持閉包清潔來解決這個問題。 – 2014-09-05 18:43:57

0

似乎是一個典型的反模式。 請參閱「使用foreachRDD的設計模式」章節http://spark.apache.org/docs/latest/streaming-programming-guide.html瞭解正確的模式。

+0

僅在StackOverflow中禁止鏈接回答,因爲鏈接可能會中斷,即使它們包含正確的答案。如果您可以編輯答案以包含鏈接中的相關材料,這將有所幫助。 – 2014-10-01 15:31:46