|
@@ -0,0 +1,43 @@
|
|
|
|
+package com.winhc.bigdata.spark.test
|
|
|
|
+
|
|
|
|
+import java.util
|
|
|
|
+
|
|
|
|
+import com.winhc.bigdata.spark.utils.{MaxComputer2Phoenix, SparkUtils}
|
|
|
|
+import org.apache.spark.sql.Row
|
|
|
|
+import org.apache.spark.sql.types.{DataType, StringType, StructField, StructType}
|
|
|
|
+
|
|
|
|
+import scala.collection.mutable
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+object TestSpark2Phoenix2 {
|
|
|
|
+ def main(args: Array[String]): Unit = {
|
|
|
|
+ val map = mutable.Map[String, String](
|
|
|
|
+ "spark.hadoop.odps.spark.local.partition.amt" -> "100",
|
|
|
|
+ "spark.hadoop.odps.project.name" -> "winhc_eci_dev"
|
|
|
|
+ )
|
|
|
|
+ val sparkSession = SparkUtils.InitEnv("scala spark on Phoenix5.x test", map)
|
|
|
|
+
|
|
|
|
+ import sparkSession._
|
|
|
|
+
|
|
|
|
+ val odpsTable = "winhc_eci_dev.inc_ads_company_bid_list"
|
|
|
|
+ val phoenixTable = "company_bid_list"
|
|
|
|
+ val ds = "20200621"
|
|
|
|
+ val sublistTableFieldName = sparkSession.table(odpsTable).columns.filter(s => {
|
|
|
|
+ !s.equals("ds") && !s.equals("new_cid") && !s.equals("rowkey") && !s.equals("cids") && !s.equals("new_cids") && !s.equals("cid")
|
|
|
|
+ }).seq
|
|
|
|
+
|
|
|
|
+ val colsList = sublistTableFieldName ++ Seq("new_cid")
|
|
|
|
+ println(colsList)
|
|
|
|
+ MaxComputer2Phoenix(
|
|
|
|
+ sparkSession,
|
|
|
|
+ colsList,
|
|
|
|
+ odpsTable,
|
|
|
|
+ phoenixTable,
|
|
|
|
+ "20200621",
|
|
|
|
+ Seq("new_cid","id")
|
|
|
|
+ ).syn()
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ sparkSession.stop()
|
|
|
|
+ }
|
|
|
|
+}
|