Apache SparkSQL ScalaAPI 多数据源交互

Apache SparkSQL ScalaAPI 多数据源交互

准备数据

person.txt内容

1 zhangsan 20
2 lisi 29
3 wangwu 25
4 zhaoliu 30
5 tianqi 35
6 kobe 40

写数据

package demo12

import java.util.Properties

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Test08 {
  case class Person(id:Int,name:String,age:Int)

  def main(args: Array[String]): Unit = {
    //1.创建SparkSession
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("SparkSQL").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //2.读取文件
    val fileRDD: RDD[String] = sc.textFile("E:\\cache\\sparkCache\\20200409\\person.txt")
    val linesRDD: RDD[Array[String]] = fileRDD.map(_.split(" "))
    val rowRDD: RDD[Person] = linesRDD.map(line =>Person(line(0).toInt,line(1),line(2).toInt))

    //3.将RDD转成DF
    //注意:RDD中原本没有toDF方法,新版本中要给它增加一个方法,可以使用隐式转换
    import spark.implicits._

    //注意:上面的rowRDD的泛型是Person,里面包含了Schema信息
    //所以SparkSQL可以通过反射自动获取到并添加给DF
    val personDF: DataFrame = rowRDD.toDF

    //==================将DF写入到不同数据源===================
    //Text data source supports only a single column, and you have 3 columns.;
    //personDF.write.text("D:\\data\\output\\text")

    personDF.write.json("E:\\cache\\sparkCache\\20200409\\output001\\json")
    personDF.write.csv("E:\\cache\\sparkCache\\20200409\\output001\\csv")
    personDF.write.parquet("E:\\cache\\sparkCache\\20200409\\output001\\parquet")

    val prop = new Properties()
    prop.setProperty("user","root")
    prop.setProperty("password","root")

    //即使这个表不存在,也可以成功写入,因为personDF含有Schema信息
    personDF.write.mode(SaveMode.Overwrite).jdbc(
      "jdbc:mysql://localhost:3306/bigdata0409?characterEncoding=UTF-8","person",prop)

    println("写入成功")

    sc.stop()
    spark.stop()
  }
}

读数据

package demo12

import java.util.Properties

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

object Test09 {

  def main(args: Array[String]): Unit = {
    //1.创建SparkSession
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("SparkSQL").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //2.读取文件
    spark.read.json("E:\\cache\\sparkCache\\20200409\\output001\\json").show()
    spark.read.csv("E:\\cache\\sparkCache\\20200409\\output001\\csv").toDF("id","name","age").show()
    spark.read.parquet("E:\\cache\\sparkCache\\20200409\\output001\\parquet").show()


    val prop = new Properties()
    prop.setProperty("user","root")
    prop.setProperty("password","root")
    spark.read.jdbc(
      "jdbc:mysql://localhost:3306/bigdata0409?characterEncoding=UTF-8","person",prop).show()

    sc.stop()
    spark.stop()
  }
}