2017-03-02 49 views
4

我有RDD [((Long,Long),(Long,Long))]格式的RDD,我需要轉換或轉換成RDD [((Long,Long), Long,Long,Long,Long))]其中第二個RDD元組基於第一個RDD的函數。來自RDD映射的Spark Scala序列化錯誤

我想實現這個基礎的地圖功能,但是,我認爲我在這裏做錯了什麼。請幫我解決這個問題。

下面是完整的代碼:

package com.ranker.correlation.listitem 
import org.apache.spark.SparkConf 
import org.apache.spark.SparkContext 
import org.apache.spark.SparkContext._ 
import org.apache.spark.rdd._ 
import scala.collection.Map 

class ListItemCorrelation(sc: SparkContext) extends Serializable { 

    def up_down(dirX: Long, dirY: Long): (Long, Long, Long, Long) = { 
    if (dirX.equals(1)) { 
     if (dirY.equals(1)) { 
     return (1, 0, 0, 0) 
     } else { 
     return (0, 1, 0, 0) 
     } 
    } else { 
     if (dirY.equals(1)) { 
     return (0, 0, 1, 0) 
     } else { 
     return (0, 0, 0, 1) 
     } 
    } 
    } 

    def run(votes: String): RDD[((Long, Long), (Long, Long, Long, Long))] = { 
    val userVotes = sc.textFile(votes) 
    val userVotesPairs = userVotes.map { t => 
     val p = t.split(",") 
     (p(0).toLong, (p(1).toLong, p(2).toLong)) 
    } 
    val jn = userVotesPairs.join(userVotesPairs).values.filter(t => t._1._1.<(t._2._1)) 
    val first = jn.map(t => ((t._1._1, t._2._1), (t._1._2, t._2._2))) 
    var second = first.map(t => ((t._1._1, t._2._1), up_down(t._1._2, t._2._2))) 
    //More functionality 
    return result 
    } 

} 
object ListItemCorrelation extends Serializable { 
    def main(args: Array[String]) { 
    val votes = args(0) 
    val conf = new SparkConf().setAppName("SparkJoins").setMaster("local") 
    val context = new SparkContext(conf) 
    val job = new ListItemCorrelation(context) 
    val results = job.run(votes) 
    val output = args(1) 
    results.saveAsTextFile(output) 
    context.stop() 
    } 
} 

當我嘗試運行此腳本,我收到以下錯誤:

Exception in thread "main" org.apache.spark.SparkException: Task not serializable at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:298) at org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:288) at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:108) at org.apache.spark.SparkContext.clean(SparkContext.scala:2094) at org.apache.spark.rdd.RDD$$anonfun$map$1.apply(RDD.scala:370) at org.apache.spark.rdd.RDD$$anonfun$map$1.apply(RDD.scala:369) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) at org.apache.spark.rdd.RDD.withScope(RDD.scala:362) at org.apache.spark.rdd.RDD.map(RDD.scala:369) at com.ranker.correlation.listitem.ListItemCorrelation.run(ListItemCorrelation.scala:34) at com.ranker.correlation.listitem.ListItemCorrelation$.main(ListItemCorrelation.scala:47) at com.ranker.correlation.listitem.ListItemCorrelation.main(ListItemCorrelation.scala) Caused by: java.io.NotSerializableException: org.apache.spark.SparkContext Serialization stack: - object not serializable (class: org.apache.spark.SparkContext, value: [email protected]) - field (class: com.ranker.correlation.listitem.ListItemCorrelation, name: sc, type: class org.apache.spark.SparkContext) - object (class com.ranker.correlation.listitem.ListItemCorrelation, [email protected]) - field (class: com.ranker.correlation.listitem.ListItemCorrelation$$anonfun$4, name: $outer, type: class com.ranker.correlation.listitem.ListItemCorrelation) - object (class com.ranker.correlation.listitem.ListItemCorrelation$$anonfun$4, ) at org.apache.spark.serializer.SerializationDebugger$.improveException(SerializationDebugger.scala:40) at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:46) at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:100) at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:295) ... 12 more

這個錯誤發生在執行以下行:

var second = first.map(t => ((t._1._1, t._2._1), up_down(t._1._2, t._2._2)))

我對scala很新,請幫我找到正確的方法來做到這一點。

+0

將'SparkContext'作爲'run'的參數向下推?在目前的實現中,它會一直拖下來,並且因爲它不是序列化,所以會導致失敗。 – zero323

+0

@geek:你找到了解決方案嗎? – arun

回答

0

up_down方法放在伴隨對象上。當在RDD閉包中訪問任何類變量時,該類(以及其中的所有內容,如SparkContext)將被序列化。方法參數在這裏計入類變量。使用靜態對象可以解決這個問題:

package com.ranker.correlation.listitem 
import org.apache.spark.SparkConf 
import org.apache.spark.SparkContext 
import org.apache.spark.SparkContext._ 
import org.apache.spark.rdd._ 
import scala.collection.Map 

object ListItemCorrelation { 
    def up_down(dirX: Long, dirY: Long): (Long, Long, Long, Long) = { 
    if (dirX.equals(1)) { 
     if (dirY.equals(1)) { 
     return (1, 0, 0, 0) 
     } else { 
     return (0, 1, 0, 0) 
     } 
    } else { 
     if (dirY.equals(1)) { 
     return (0, 0, 1, 0) 
     } else { 
     return (0, 0, 0, 1) 
     } 
    } 
    } 
} 


class ListItemCorrelation(sc: SparkContext) extends Serializable { 

    def run(votes: String): RDD[((Long, Long), (Long, Long, Long, Long))] = { 
    val userVotes = sc.textFile(votes) 
    val userVotesPairs = userVotes.map { t => 
     val p = t.split(",") 
     (p(0).toLong, (p(1).toLong, p(2).toLong)) 
    } 
    val jn = userVotesPairs.join(userVotesPairs).values.filter(t => t._1._1.<(t._2._1)) 
    val first = jn.map(t => ((t._1._1, t._2._1), (t._1._2, t._2._2))) 
    var second = first.map(t => ((t._1._1, t._2._1), ListItemCorrelation.up_down(t._1._2, t._2._2))) 
    //More functionality 
    return result 
    } 

} 
object ListItemCorrelation extends Serializable { 
    def main(args: Array[String]) { 
    val votes = args(0) 
    val conf = new SparkConf().setAppName("SparkJoins").setMaster("local") 
    val context = new SparkContext(conf) 
    val job = new ListItemCorrelation(context) 
    val results = job.run(votes) 
    val output = args(1) 
    results.saveAsTextFile(output) 
    context.stop() 
    } 
} 
相關問題