|
@@ -2,7 +2,7 @@ package com.winhc.bigdata.spark.jobs
|
|
|
|
|
|
import com.winhc.bigdata.spark.config.EsConfig
|
|
import com.winhc.bigdata.spark.config.EsConfig
|
|
import com.winhc.bigdata.spark.udf.{BaseFunc, CompanyStaffAggs}
|
|
import com.winhc.bigdata.spark.udf.{BaseFunc, CompanyStaffAggs}
|
|
-import com.winhc.bigdata.spark.utils.{LoggingUtils, MaxComputer2Phoenix, SparkUtils}
|
|
|
|
|
|
+import com.winhc.bigdata.spark.utils.{CompanyIncSummary, LoggingUtils, MaxComputer2Phoenix, SparkUtils}
|
|
import org.apache.spark.internal.Logging
|
|
import org.apache.spark.internal.Logging
|
|
import org.apache.spark.sql.SparkSession
|
|
import org.apache.spark.sql.SparkSession
|
|
|
|
|
|
@@ -261,6 +261,8 @@ object company_staff {
|
|
, "create_time"
|
|
, "create_time"
|
|
, "update_time"
|
|
, "update_time"
|
|
, "deleted"), "winhc_eci_dev.inc_ads_company_staff", "COMPANY_STAFF", ds, "rowkey").syn()
|
|
, "deleted"), "winhc_eci_dev.inc_ads_company_staff", "COMPANY_STAFF", ds, "rowkey").syn()
|
|
|
|
+
|
|
|
|
+ CompanyIncSummary(spark, project, "company_staff", "new_cid", Seq("new_cid", "hid")).calc
|
|
}
|
|
}
|
|
|
|
|
|
def inc_bulk_save(startDs: String): Unit = {
|
|
def inc_bulk_save(startDs: String): Unit = {
|
|
@@ -308,7 +310,7 @@ object company_staff {
|
|
val e = CompanyStaffUtil(spark, project)
|
|
val e = CompanyStaffUtil(spark, project)
|
|
e.init()
|
|
e.init()
|
|
|
|
|
|
-// e.inc_bulk_save("20200603")
|
|
|
|
|
|
+ // e.inc_bulk_save("20200603")
|
|
|
|
|
|
if (args.length == 1) {
|
|
if (args.length == 1) {
|
|
val Array(ds) = args
|
|
val Array(ds) = args
|