|
@@ -1,7 +1,7 @@
|
|
|
package com.winhc.bigdata.spark.ng.utils
|
|
|
|
|
|
import com.winhc.bigdata.spark.ng.jobs.{args_company_job, general_handler}
|
|
|
-import com.winhc.bigdata.spark.utils.{BaseUtil, ElasticSearchIndexUtils, LoggingUtils, SparkUtils}
|
|
|
+import com.winhc.bigdata.spark.utils.{ElasticSearchIndexUtils, LoggingUtils, SparkUtils}
|
|
|
import org.apache.spark.sql.SparkSession
|
|
|
|
|
|
import scala.annotation.meta.getter
|
|
@@ -33,19 +33,19 @@ case class ReimportSummary(s: SparkSession
|
|
|
.explode_calc()
|
|
|
}
|
|
|
|
|
|
- def calc(): Unit = {
|
|
|
+ def calc(ds: String): Unit = {
|
|
|
for (e <- tabs) {
|
|
|
re_explode(e)
|
|
|
}
|
|
|
|
|
|
- // sql(s"ALTER TABLE winhc_ng.out_es_summary DROP IF EXISTS PARTITION(ds>'0')")
|
|
|
- // sql(s"ALTER TABLE winhc_ng.out_es_summary_person DROP IF EXISTS PARTITION(ds>'0')")
|
|
|
+ dropAllPartitions("winhc_ng.out_es_summary")
|
|
|
+ dropAllPartitions("winhc_ng.out_es_summary_person")
|
|
|
|
|
|
- create_index()
|
|
|
+ create_index(ds)
|
|
|
}
|
|
|
|
|
|
|
|
|
- def create_index(): Unit = {
|
|
|
+ def create_index(ds: String): Unit = {
|
|
|
val template =
|
|
|
s"""
|
|
|
|{
|
|
@@ -69,16 +69,15 @@ case class ReimportSummary(s: SparkSession
|
|
|
|}
|
|
|
|""".stripMargin
|
|
|
|
|
|
- val yesterday = BaseUtil.getYesterday()
|
|
|
- es.put(s"out_es_summary_person_v$yesterday", template)
|
|
|
- es.put(s"out_es_summary_v$yesterday", template)
|
|
|
+ es.put(s"out_es_summary_person_v$ds", template)
|
|
|
+ es.put(s"out_es_summary_v$ds", template)
|
|
|
}
|
|
|
|
|
|
-
|
|
|
}
|
|
|
|
|
|
object ReimportSummary {
|
|
|
def main(args: Array[String]): Unit = {
|
|
|
+ val Array(ds) = args
|
|
|
val project = "winhc_ng"
|
|
|
val config = mutable.Map(
|
|
|
"spark.hadoop.odps.project.name" -> project,
|
|
@@ -86,8 +85,14 @@ object ReimportSummary {
|
|
|
"spark.hadoop.odps.spark.local.partition.amt" -> "100"
|
|
|
)
|
|
|
val spark = SparkUtils.InitEnv(this.getClass.getSimpleName + ":reimport", config)
|
|
|
- val tns = Seq("company_court_open_announcement")
|
|
|
- ReimportSummary(spark, tns).calc()
|
|
|
+ val tns = Seq(
|
|
|
+ "company_court_open_announcement"
|
|
|
+ ,"company_court_announcement"
|
|
|
+ ,"company_send_announcement"
|
|
|
+ ,"company_court_register"
|
|
|
+ ,"company_land_mortgage"
|
|
|
+ )
|
|
|
+ ReimportSummary(spark, tns).calc(ds)
|
|
|
spark.stop()
|
|
|
}
|
|
|
}
|