Jelajahi Sumber

Merge remote-tracking branch 'origin/master'

xufei 3 tahun lalu
induk
melakukan
800cd5dd5d

+ 3 - 0
src/main/scala/com/winhc/bigdata/spark/jobs/chance/CompanyChangeHandle.scala

@@ -25,6 +25,9 @@ trait CompanyChangeHandle extends Serializable with Logging {
    * @return rowkey,cid,类型【insert or update】,新数据,更新字段,更新标题,变更标签【1.一般变更,2.风险变更 ...】,业务时间
    */
   def handle(rowkey: String, oldMap: Map[String, String], newMap: Map[String, String]): (String, String, String, Map[String, String], String, String, String, String, Map[String, String]) = {
+    if(getBizTime(newMap)==null){
+      return null
+    }
     if (oldMap == null) {
       (rowkey, getCid(rowkey, newMap), "insert", newMap, null, getInsertTitle(newMap), getLabel(oldMap, newMap), getBizTime(newMap), null)
     } else {

+ 7 - 3
src/main/scala/com/winhc/bigdata/spark/ng/jobs/CompanyIndexJob.scala

@@ -79,9 +79,11 @@ case class CompanyIndexJob(s: SparkSession,
     , "new_cid"
     , "company_tm"
     , "app_info"
-    , "company_score_weight")
+    , "company_score_weight"
+    , "deleted"
+  )
 
-  private val org_prefix = "ods"
+  private val org_prefix = "ads"
 
   def run(): Unit = {
     if (!spark.catalog.tableExists(target_tab) || getLastPartitionsOrElse(target_tab, null) == null)
@@ -256,7 +258,9 @@ case class CompanyIndexJob(s: SparkSession,
          |INSERT OVERWRITE TABLE $target_tab_simp PARTITION(ds='$target_ds')
          |SELECT ${getColumns(target_tab_simp).diff(Seq("ds")).mkString(",")}
          |FROM
-         |    out_company_index
+         |    $target_tab
+         |WHERE
+         |    ds='$target_ds'
          |""".stripMargin)
 
   }

+ 368 - 0
src/main/scala/com/winhc/bigdata/spark/ng/jobs/CompanySummaryNg.scala

@@ -0,0 +1,368 @@
+package com.winhc.bigdata.spark.ng.jobs
+
+import com.winhc.bigdata.spark.utils.BaseUtil.isWindows
+import com.winhc.bigdata.spark.utils.{BaseUtil, LoggingUtils, SparkUtils}
+import org.apache.commons.lang3.StringUtils
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.spark.sql.SparkSession
+
+import scala.annotation.meta.getter
+import scala.collection.mutable
+
+/**
+ * @Author: XuJiakai
+ * @Date: 2020/10/10 10:23
+ * @Description: winhc_ng空间下摘要增强版
+ */
+case class GroupByInfoNg(field: String, value_alias: Seq[(String, String)])
+
+case class CompanySummaryNg(s: SparkSession,
+                            project: String, //表所在工程名
+                            tableName: String, //表名(不加前辍)
+                            companyIdField: String, // 公司id fieldName,例如:split(rowkey,'_')
+                            distinctField: String = "rowkey", //去重主键
+                            groupByInfo: GroupByInfoNg = null, //group的其它条件
+                            where: String = "", //where条件,例如:deleted = 0
+                            sortField: String = "ds"
+                            ) extends LoggingUtils {
+  @(transient@getter) val spark: SparkSession = s
+
+  private val f_bytes: Array[Byte] = Bytes.toBytes("F")
+  private val name_bytes: Array[Byte] = Bytes.toBytes(tableName.toUpperCase)
+  val ads_table = s"${project}.ads_$tableName" //存量ads表
+  val inc_ads_table = s"${project}.inc_ads_$tableName"
+
+  val new_cols = getColumns(ads_table).intersect(getColumns(inc_ads_table))
+
+  private def create_or_replace_summary(target_tab: String): Unit = {
+    val ddl = groupByInfo == null match {
+      case true => s"$tableName BIGINT"
+      case false => groupByInfo.value_alias.map(r => {
+        s"${r._2} BIGINT"
+      }).mkString(",")
+    }
+    val d =
+      s"""
+         |CREATE TABLE IF NOT EXISTS $target_tab
+         |(
+         |    company_id string
+         |    ,${ddl}
+         |)
+         |COMMENT 'summary tmp,create by ${BaseUtil.nowDate(pattern = "yyyy-MM-dd HH:mm:ss")}'
+         |""".stripMargin
+
+    if (spark.catalog.tableExists(target_tab)) {
+      sql(
+        s"""
+           |DROP TABLE IF EXISTS $target_tab
+           |""".stripMargin)
+    }
+
+    sql(d)
+  }
+
+  def calc(is_inc: Boolean = true, target_tab: String = ""): Unit = {
+    val ads_last_ds = getLastPartitionsOrElse(ads_table, "0")
+    val inc_ads_last_ds = getLastPartitionsOrElse(inc_ads_table, "0")
+    val wh = StringUtils.isEmpty(where) match {
+      case true => s""
+      case false => s"AND   $where"
+    }
+    val tmp_tab = "inc_tmp_view"
+
+    is_inc match {
+      case true => {
+        sql(
+          s"""
+             |SELECT  ${new_cols.map(getCastCols(_, "org_tab.")).mkString(",")}
+             |FROM    (
+             |            SELECT  DISTINCT $companyIdField as xjk_cid
+             |            FROM    $inc_ads_table
+             |            WHERE   ds = $inc_ads_last_ds
+             |        ) id_table
+             |JOIN (
+             |              SELECT  ${new_cols.map(getCastCols(_, "")).mkString(",")}
+             |                      ,$companyIdField as xjk_cid
+             |              FROM    $inc_ads_table
+             |              WHERE   ds > '$ads_last_ds'
+             |              AND     ds < '$inc_ads_last_ds'
+             |              UNION ALL
+             |              SELECT  ${new_cols.map(getCastCols(_, "")).mkString(",")}
+             |                      ,$companyIdField as xjk_cid
+             |              FROM    $ads_table
+             |              WHERE   ds = '$ads_last_ds'
+             |          ) org_tab
+             |ON      id_table.xjk_cid = org_tab.xjk_cid
+             |UNION ALL
+             |SELECT  ${new_cols.map(getCastCols(_, "")).mkString(",")}
+             |FROM    $inc_ads_table
+             |WHERE   ds = $inc_ads_last_ds
+             |""".stripMargin)
+          .createOrReplaceTempView(tmp_tab)
+      }
+      case false => {
+        sql(
+          s"""
+             |SELECT  ${new_cols.map(getCastCols(_, "")).mkString(",")}
+             |FROM    $ads_table
+             |WHERE   ds = '$ads_last_ds'
+             |UNION ALL
+             |SELECT  ${new_cols.map(getCastCols(_, "")).mkString(",")}
+             |FROM    $inc_ads_table
+             |WHERE   ds > $ads_last_ds
+             |""".stripMargin)
+          .createOrReplaceTempView(tmp_tab)
+
+      }
+    }
+
+    val distinct_tab = s"${tmp_tab}_distinct"
+    sql(
+      s"""
+         |SELECT  ${new_cols.map(getCastCols(_, "")).mkString(",")}
+         |FROM    (
+         |            SELECT  tmp.*
+         |                    ,ROW_NUMBER() OVER(PARTITION BY $distinctField ORDER BY $sortField DESC ) c
+         |            FROM    $tmp_tab AS tmp
+         |        ) tmp2
+         |WHERE   tmp2.c = 1
+         |$wh
+         |""".stripMargin)
+      .createOrReplaceTempView(distinct_tab)
+
+
+    val view = groupByInfo == null match {
+      case true => s"arr[0] as $tableName"
+      case false => groupByInfo.value_alias.indices.map(i => {
+        s"arr[$i] as ${groupByInfo.value_alias(i)._2}"
+      }).mkString(",")
+    }
+
+    //注册函数
+    if (groupByInfo != null) {
+      val fieldSeq = groupByInfo.value_alias.map(r => {
+        (s"${r._1}", r._2)
+      })
+
+      def getResArr(group_val: String, num: Long): Seq[Long] = {
+        val res = scala.collection.mutable.ArrayBuffer[Long]()
+        for (i <- fieldSeq) {
+          if (i._1.equals(group_val)) {
+            res += num
+          } else {
+            res += 0
+          }
+        }
+        res.toSeq
+      }
+
+      spark.udf.register("xjk_func", getResArr _)
+    }
+
+    val groupKey_show = groupByInfo == null match {
+      case true => s",array(count(1)) as arr"
+      case false => s",xjk_func(cast(${groupByInfo.field} as STRING),count(1)) as arr"
+    }
+
+    val groupKey = groupByInfo == null match {
+      case true => s""
+      case false => s",${groupByInfo.field}"
+    }
+
+    sql(
+      s"""
+         |SELECT  company_id
+         |        ,${view}
+         |FROM    (
+         |        SELECT  $companyIdField as company_id
+         |                $groupKey_show
+         |        FROM    $distinct_tab
+         |        GROUP BY $companyIdField ${groupKey}
+         |)
+         |""".stripMargin)
+      .createOrReplaceTempView("summary_tab")
+
+    if (groupByInfo != null) {
+      sql(
+        s"""
+           |SELECT  company_id
+           |        ,${groupByInfo.value_alias.map(_._2).map(f => s"sum($f) as $f").mkString(",")}
+           |FROM    summary_tab
+           |GROUP BY company_id
+           |""".stripMargin)
+        .createOrReplaceTempView("summary_tab")
+    }
+
+
+    if (StringUtils.isEmpty(target_tab)) {
+      import com.winhc.bigdata.spark.implicits.DataFrame2HBaseHelper._
+      val writF = getColumns("summary_tab").diff(Seq("company_id"))
+      sql(
+        s"""
+           |SELECT *
+           |FROM
+           |    summary_tab
+           |""".stripMargin)
+        .save2HBase("NG_COMPANY_SUMMARY", "company_id", writF)
+      /*create_or_replace_summary(target_tab)
+      sql(
+        s"""
+           |INSERT ${if (isWindows) "INTO" else "OVERWRITE"} TABLE $target_tab
+           |SELECT *
+           |FROM
+           |    summary_tab
+           |""".stripMargin)*/
+    } else {
+      create_or_replace_summary(target_tab)
+      sql(
+        s"""
+           |INSERT ${if (isWindows) "INTO" else "OVERWRITE"} TABLE $target_tab
+           |SELECT *
+           |FROM
+           |    summary_tab
+           |""".stripMargin)
+    }
+  }
+
+
+  private def getCastCols(name: String, pre: String): String = {
+    val list = List("cid", "new_cid", "ncid")
+    if (list.contains(name)) {
+      return s"CAST(${pre}${name} as BIGINT) $name"
+    }
+    pre + name
+  }
+
+}
+
+object CompanySummaryNg {
+
+
+  def run(spark: SparkSession, tab: String, target_tab: String = null): Unit = {
+    var csp: CompanySummaryNg = null
+    val project = "winhc_ng"
+
+    tab match {
+      case "company_zxr_final_case" => {
+        //终本案件
+        csp = CompanySummaryNg(s = spark
+          , project = project
+          , tableName = "company_zxr_final_case"
+          , companyIdField = "split(rowkey,'_')[0]"
+          , where = "deleted = 0"
+          , groupByInfo = GroupByInfoNg(field = "status", value_alias = Seq(
+            ("0", "company_zxr_final_case_0")
+            , ("1", "company_zxr_final_case_1")
+          ))
+        )
+      }
+      case "company_dishonest_info" => {
+        //失信人
+        csp = CompanySummaryNg(s = spark
+          , project = project
+          , tableName = "company_dishonest_info"
+          , companyIdField = "split(rowkey,'_')[0]"
+          , where = "deleted = 0"
+          , groupByInfo = GroupByInfoNg(field = "status", value_alias = Seq(
+            ("0", "company_dishonest_info_0")
+            , ("1", "company_dishonest_info_1")
+          ))
+        )
+      }
+      case "company_zxr_list" => {
+        //执行人
+        csp = CompanySummaryNg(s = spark
+          , project = project
+          , tableName = "company_zxr_list"
+          , companyIdField = "split(rowkey,'_')[0]"
+          , where = "deleted = 0"
+          , groupByInfo = GroupByInfoNg(field = "status", value_alias = Seq(
+            ("0", "company_zxr_list_0")
+            , ("1", "company_zxr_list_1")
+          ))
+        )
+      }
+      case "company_zxr_restrict" => {
+        //限制高消费
+        csp = CompanySummaryNg(s = spark
+          , project = project
+          , tableName = "company_zxr_restrict"
+          , companyIdField = "split(rowkey,'_')[0]"
+          , where = "deleted = 0"
+          , groupByInfo = GroupByInfoNg(field = "status", value_alias = Seq(
+            ("0", "company_zxr_restrict_0")
+            , ("1", "company_zxr_restrict_1")
+          ))
+        )
+      }
+      case "company_land_mortgage" => {
+        //土地抵押
+        csp = CompanySummaryNg(s = spark
+          , project = project
+          , tableName = "company_land_mortgage"
+          , companyIdField = "split(rowkey,'_')[0]"
+          //          , where = "deleted = 0"
+          , groupByInfo = GroupByInfoNg(field = "type", value_alias = Seq(
+            ("mortgagor", "company_land_mortgage_mortgagor")
+            , ("mortgagee", "company_land_mortgage_mortgagee")
+            , ("bothsame", "company_land_mortgage_bothsame")
+            , ("bothone", "company_land_mortgage_bothone")
+            , ("bothtwo", "company_land_mortgage_bothtwo")
+          ))
+        )
+      }
+
+      case "company_land_transfer" => {
+        //土地转让
+        csp = CompanySummaryNg(s = spark
+          , project = project
+          , tableName = "company_land_transfer"
+          , companyIdField = "split(rowkey,'_')[0]"
+          //          , where = "deleted = 0"
+          , groupByInfo = GroupByInfoNg(field = "type", value_alias = Seq(
+            ("pre", "company_land_transfer_pre")
+            , ("now", "company_land_transfer_now")
+            , ("bothsame", "company_land_transfer_bothsame")
+            , ("bothone", "company_land_transfer_bothone")
+            , ("bothtwo", "company_land_transfer_bothtwo")
+          ))
+        )
+      }
+      case _ =>
+        csp = CompanySummaryNg(s = spark
+          , project = project
+          , tableName = tab
+          , companyIdField = "split(rowkey,'_')[0]"
+          ,where = "instr(rowkey,'_') != 0 and deleted = 0"
+        )
+    }
+
+    if (target_tab == null)
+      csp.calc()
+    else
+      csp.calc(is_inc = false, target_tab = target_tab)
+  }
+
+
+  def main(args: Array[String]): Unit = {
+
+    val Array(tab) = args
+
+    println(
+      s"""
+         |tab: $tab
+         |""".stripMargin)
+
+    val config = mutable.Map(
+      "spark.hadoop.odps.project.name" -> "winhc_ng",
+      "spark.debug.maxToStringFields" -> "200",
+      "spark.hadoop.odps.spark.local.partition.amt" -> "10000"
+    )
+
+    val spark = SparkUtils.InitEnv(getClass.getSimpleName, config)
+
+//    run(spark, tab,"winhc_ng.tmp_xjk_summary")
+
+    spark.stop()
+  }
+}

+ 164 - 0
src/main/scala/com/winhc/bigdata/spark/ng/jobs/general_handler.scala

@@ -0,0 +1,164 @@
+package com.winhc.bigdata.spark.ng.jobs
+
+import com.winhc.bigdata.spark.config.EsConfig
+import com.winhc.bigdata.spark.udf.BaseFunc
+import com.winhc.bigdata.spark.utils.BaseUtil.isWindows
+import com.winhc.bigdata.spark.utils.{LoggingUtils, SparkUtils}
+import org.apache.spark.sql.SparkSession
+
+import scala.annotation.meta.getter
+import scala.collection.mutable
+
+/**
+ * @author: XuJiakai
+ * @date: 2020/12/21 10:13
+ *        ng空间下企业数据通用处理程序
+ */
+case class general_handler(s: SparkSession,
+                           project: String, //表所在工程名
+                           md5_fields: Seq[String],
+                           tn: String
+                          ) extends LoggingUtils with BaseFunc {
+  @(transient@getter) val spark: SparkSession = s
+
+  val ods_tab = s"$project.ods_$tn"
+  val inc_ods_tab = s"$project.inc_ods_$tn"
+
+  val ads_tab = s"$project.ads_$tn"
+  val inc_ads_tab = s"$project.inc_ads_$tn"
+
+  verify()
+  reg_udf()
+
+  private def verify(): Unit = {
+    val catalog = spark.catalog
+    if (!(catalog.tableExists(ods_tab) && catalog.tableExists(inc_ods_tab) && catalog.tableExists(inc_ads_tab) && catalog.tableExists(ads_tab))) {
+      println("table is not exists !!!")
+      sys.exit(-1)
+    }
+  }
+
+  private def reg_udf(): Unit = {
+    cleanup()
+  }
+
+  val rowkey_f = md5_fields.isEmpty match {
+    case true => s"company_id"
+    case false => s"concat_ws('_',company_id,md5(cleanup(concat_ws('',${md5_fields.mkString(",")}))))"
+  }
+
+  def all(): Unit = {
+    val inc_ods_ds = getLastPartitionsOrElse(inc_ods_tab, getLastPartitionsOrElse(ods_tab, null))
+
+    if (inc_ods_ds == null) {
+      println("ds is null !!!")
+      return
+    }
+    val inter_cols = getColumns(ods_tab).intersect(getColumns(inc_ods_tab)).diff(Seq("rowkey"))
+
+    sql(
+      s"""
+         |INSERT ${if (isWindows) "INTO" else "OVERWRITE"} TABLE $ads_tab PARTITION(ds='$inc_ods_ds')
+         |SELECT  ${getColumns(ads_tab).diff(Seq("ds")).mkString(",")}
+         |FROM    (
+         |            SELECT  *
+         |                    ,ROW_NUMBER() OVER(PARTITION BY rowkey ORDER BY ds DESC) AS num
+         |            FROM    (
+         |                        SELECT  $rowkey_f as rowkey
+         |                                ,${inter_cols.mkString(",")}
+         |                        FROM    $ods_tab
+         |                        WHERE   ds > 0
+         |                        UNION ALL
+         |                        SELECT  $rowkey_f as rowkey
+         |                                ,${inter_cols.mkString(",")}
+         |                        FROM    $inc_ods_tab
+         |                        WHERE   ds > 0
+         |                    ) AS t1
+         |        ) AS t2
+         |WHERE   t2.num = 1
+         |""".stripMargin)
+  }
+
+  def inc(): Unit = {
+    val inc_ods_ds = getLastPartitionsOrElse(inc_ods_tab, getLastPartitionsOrElse(ods_tab, null))
+    var ads_ds = getLastPartitionsOrElse(ads_tab, null)
+    if (ads_ds.equals(inc_ods_ds)) {
+      ads_ds = getSecondLastPartitionOrElse(ads_tab, null)
+      if (ads_ds == null) {
+        all()
+        return
+      }
+    }
+
+    val inter_cols = getColumns(ods_tab).intersect(getColumns(inc_ods_tab)).diff(Seq("rowkey"))
+
+
+    sql(
+      s"""
+         |INSERT ${if (isWindows) "INTO" else "OVERWRITE"} TABLE $inc_ads_tab PARTITION(ds='$inc_ods_ds')
+         |SELECT  ${getColumns(inc_ads_tab).diff(Seq("ds")).mkString(",")}
+         |FROM    (
+         |            SELECT  *
+         |                    ,ROW_NUMBER() OVER(PARTITION BY rowkey ORDER BY ds DESC) AS num
+         |            FROM    (
+         |                        SELECT  $rowkey_f as rowkey
+         |                                ,${inter_cols.mkString(",")}
+         |                        FROM    $inc_ods_tab
+         |                        WHERE   ds > $ads_ds
+         |                    ) AS t1
+         |        ) AS t2
+         |WHERE   t2.num = 1
+         |""".stripMargin)
+  }
+
+
+  def calc(): Unit = {
+    val ads_ds = getLastPartitionsOrElse(ads_tab, null)
+    if (ads_ds == null)
+      all()
+    else
+      inc()
+  }
+}
+
+object general_handler {
+
+  val tab_md5_fields_map = Map(
+    "company_app_info" -> Seq("name")
+    , "company_staff" -> Seq("staff_name")
+    , "company_holder" -> Seq("holder_name")
+    , "company_icp" -> Seq("liscense", "domain")
+    , "company_tm" -> Seq("reg_no")
+  )
+
+
+  def run(s: SparkSession, project: String, tn: String, md5_fields: Seq[String] = null): Unit = {
+    var f = tab_md5_fields_map.getOrElse(tn, null)
+    if (md5_fields != null) {
+      f = md5_fields
+    }
+    if (f == null) {
+      println("md5 fields is empty !!!")
+      sys.exit(-2)
+    }
+    tn match {
+      case _ => {
+        general_handler(s = s, project = project, tn = tn, md5_fields = f).calc()
+      }
+    }
+  }
+
+  def main(args: Array[String]): Unit = {
+    val Array(tn) = args
+    val project = "winhc_ng"
+    val config = EsConfig.getEsConfigMap ++ mutable.Map(
+      "spark.hadoop.odps.project.name" -> project,
+      "spark.debug.maxToStringFields" -> "200",
+      "spark.hadoop.odps.spark.local.partition.amt" -> "100"
+    )
+    val spark = SparkUtils.InitEnv(this.getClass.getSimpleName + ":" + tn, config)
+    general_handler.run(spark, project, tn, null)
+    spark.stop()
+
+  }
+}

+ 107 - 0
src/main/scala/com/winhc/bigdata/spark/ng/jobs/inc_company_ng.scala

@@ -0,0 +1,107 @@
+package com.winhc.bigdata.spark.ng.jobs
+
+import com.winhc.bigdata.spark.config.EsConfig
+import com.winhc.bigdata.spark.udf.BaseFunc
+import com.winhc.bigdata.spark.utils.BaseUtil.isWindows
+import com.winhc.bigdata.spark.utils.{LoggingUtils, SparkUtils}
+import org.apache.spark.sql.SparkSession
+
+import scala.annotation.meta.getter
+import scala.collection.mutable
+
+/**
+ * @author: XuJiakai
+ * @date: 2020/12/21 09:36
+ */
+case class inc_company_ng(s: SparkSession,
+                          project: String //表所在工程名
+                         ) extends LoggingUtils with BaseFunc {
+  @(transient@getter) val spark: SparkSession = s
+
+  val ods_tab = "winhc_ng.ods_company"
+  val inc_ods_tab = "winhc_ng.inc_ods_company"
+
+  val ads_tab = "winhc_ng.ads_company"
+  val inc_ads_tab = "winhc_ng.inc_ads_company"
+
+  def all(): Unit = {
+    val inc_ods_ds = getLastPartitionsOrElse(inc_ods_tab, getLastPartitionsOrElse(ods_tab, null))
+
+    if (inc_ods_ds == null) {
+      println("ds is null !!!")
+      return
+    }
+
+    sql(
+      s"""
+         |INSERT ${if (isWindows) "INTO" else "OVERWRITE"} TABLE $ads_tab PARTITION(ds='$inc_ods_ds')
+         |SELECT  ${getColumns(ads_tab).diff(Seq("ds")).mkString(",")}
+         |FROM    (
+         |            SELECT  *
+         |                    ,ROW_NUMBER() OVER(PARTITION BY company_id ORDER BY ds DESC) AS num
+         |            FROM    (
+         |                        SELECT  *
+         |                        FROM    $ods_tab
+         |                        WHERE   ds > 0
+         |                        UNION ALL
+         |                        SELECT  *
+         |                        FROM    $inc_ods_tab
+         |                        WHERE   ds > 0
+         |                    ) AS t1
+         |        ) AS t2
+         |WHERE   t2.num = 1
+         |""".stripMargin)
+  }
+
+  def inc(): Unit = {
+    val inc_ods_ds = getLastPartitionsOrElse(inc_ods_tab, getLastPartitionsOrElse(ods_tab, null))
+    var ads_ds = getLastPartitionsOrElse(ads_tab, null)
+    if (inc_ods_ds.equals(ads_ds)) {
+      ads_ds = getSecondLastPartitionOrElse(ads_tab, null)
+      if (ads_ds == null) {
+        all()
+        return
+      }
+    }
+
+    sql(
+      s"""
+         |INSERT ${if (isWindows) "INTO" else "OVERWRITE"} TABLE $inc_ads_tab PARTITION(ds='$inc_ods_ds')
+         |SELECT  ${getColumns(inc_ads_tab).diff(Seq("ds")).mkString(",")}
+         |FROM    (
+         |            SELECT  *
+         |                    ,ROW_NUMBER() OVER(PARTITION BY company_id ORDER BY ds DESC) AS num
+         |            FROM    (
+         |                        SELECT  *
+         |                        FROM    $inc_ods_tab
+         |                        WHERE   ds > $ads_ds
+         |                    ) AS t1
+         |        ) AS t2
+         |WHERE   t2.num = 1
+         |""".stripMargin)
+  }
+
+  def calc(): Unit = {
+    val ads_ds = getLastPartitionsOrElse(ads_tab, null)
+    if (ads_ds == null)
+      all()
+    else
+      inc()
+  }
+
+}
+
+object inc_company_ng {
+  def main(args: Array[String]): Unit = {
+    val project = "winhc_ng"
+    val config = EsConfig.getEsConfigMap ++ mutable.Map(
+      "spark.hadoop.odps.project.name" -> project,
+      "spark.debug.maxToStringFields" -> "200",
+      "spark.hadoop.odps.spark.local.partition.amt" -> "1000"
+    )
+    val spark = SparkUtils.InitEnv(this.getClass.getSimpleName, config)
+    inc_company_ng(s = spark, project = project).calc()
+
+    spark.stop()
+  }
+}

+ 15 - 3
src/main/scala/com/winhc/bigdata/spark/utils/DateUtils.scala

@@ -62,7 +62,19 @@ object DateUtils {
    * @param date
    * @return
    */
-  def getNotNullStr(date: String*): String = date.filter(_ != null).head
+  def getNotNullStr(date: String*): String = {
+    if (date == null || date.isEmpty) {
+      return null
+    }
+    try {
+      date.filter(_ != null).head
+
+    } catch {
+      case e: Exception => {
+        null
+      }
+    }
+  }
 
   /**
    * 获取最小的一个日期,如果有异常情况则反回第一个日期
@@ -151,8 +163,8 @@ object DateUtils {
   }
 
   def main(args: Array[String]): Unit = {
-    println(DateUtils.toMillisTimestamp(date = "2020-09-17 18:02:02"))
-    println(System.currentTimeMillis())
+    //    println(DateUtils.toMillisTimestamp(date = "2020-09-17 18:02:02"))
+    println(getNotNullStr(null, null))
     //    println(getNotNullStr(null, "2003-10-12 10:00:00", null, "2003-11-12 00:00:02"))
   }