Quellcode durchsuchen

Merge remote-tracking branch 'origin/master'

# Conflicts:
#	src/main/scala/com/winhc/bigdata/spark/jobs/dynamic/CompanyDynamicHandle.scala
许家凯 vor 4 Jahren
Ursprung
Commit
b70f5857f5

+ 3 - 0
src/main/scala/com/winhc/bigdata/spark/jobs/chance/ChangeExtract.scala

@@ -252,6 +252,9 @@ object ChangeExtract {
     , Args(tableName = "company_employment", primaryFields = "source")
     , Args(tableName = "company_env_punishment", primaryFields = "punish_number")
     , Args(tableName = "company_icp", primaryFields = "domain")
+    , Args(tableName = "company_punishment_info", primaryFields = "punish_number")
+    , Args(tableName = "company_punishment_info_creditchina", primaryFields = "punish_number")
+    , Args(tableName = "bankruptcy_open_case", primaryFields = "case_no")//破产重整
 
     , Args(tableName = "company_certificate", primaryFields = "type")
     , Args(tableName = "company_abnormal_info", primaryFields = "remove_reason")

+ 28 - 0
src/main/scala/com/winhc/bigdata/spark/jobs/chance/table/bankruptcy_open_case.scala

@@ -0,0 +1,28 @@
+
+package com.winhc.bigdata.spark.jobs.chance.table
+
+import com.winhc.bigdata.spark.jobs.chance.CompanyChangeHandle
+import com.winhc.bigdata.spark.utils.ChangeExtractUtils
+
+/**
+ * @Author: Yan Yongnian
+ * @Date: 2020/8/5
+ * @Description:
+ */
+
+
+//破产公告
+
+case class bankruptcy_open_case(equCols: Seq[String]) extends CompanyChangeHandle with Serializable {
+
+  override def getLabel(oldMap: Map[String, String], newMap: Map[String, String]): String = {
+    val str = ChangeExtractUtils.getTags(newMap, "破产重整", Array("case_no", "case_type", "agency_court", "applicant", "respondent", "public_date"))
+    str
+  }
+
+  override def getBizTime(newMap: Map[String, String]): String = newMap("public_date")
+
+  override def getUpdateTitle(newMap: Map[String, String]): String = getValueOrNull(newMap("case_no"), s"${newMap("case_no")}破产重整发生变更")
+
+  override def getInsertTitle(newMap: Map[String, String]): String = getValueOrNull(newMap("case_no"), s"新增${newMap("case_no")}破产重整")
+}

+ 2 - 0
src/main/scala/com/winhc/bigdata/spark/jobs/dynamic/CompanyDynamicHandle.scala

@@ -42,6 +42,7 @@ trait CompanyDynamicHandle {
     , "" -> "investor_equity_change" //大股东变更
     , "" -> "actual_controller_change" //实际控制人变更
     , "" -> "court_notice" //开庭公告
+    , "bankruptcy_open_case" -> "bankruptcy_open_case" //破产重整
 
     , "company_staff" -> "company_staff" //主要成员
   )
@@ -90,6 +91,7 @@ trait CompanyDynamicHandle {
     , "" -> "36" // 限制高消费
     , "" -> "37" // 被执行人
     , "" -> "38" // 送达报告
+    , "bankruptcy_open_case" -> "39" // 破产重整
   )
 
   /**

+ 51 - 0
src/main/scala/com/winhc/bigdata/spark/jobs/dynamic/tables/bankruptcy_open_case.scala

@@ -0,0 +1,51 @@
+package com.winhc.bigdata.spark.jobs.dynamic.tables
+
+import com.winhc.bigdata.spark.jobs.dynamic.CompanyDynamicHandle
+
+/**
+ * @Author yyn
+ * @Date 2020/8/5
+ * @Description TODO
+ */
+//破产公告
+case class bankruptcy_open_case() extends CompanyDynamicHandle {
+  /**
+   * 信息描述
+   *
+   * @param old_map
+   * @param new_map
+   * @return
+   */
+  override def get_rta_desc(old_map: Map[String, String], new_map: Map[String, String]): String = new_map("case_no")
+
+  /**
+   * 变更内容
+   *
+   * @param old_map
+   * @param new_map
+   * @return
+   */
+  override def get_change_content(old_map: Map[String, String], new_map: Map[String, String], cname: String = null): String = {
+    s"""案号:$new_map("case_no")\n
+       |被申请人:$new_map("respondent")\n
+       |申请人:$new_map("applicant")\n
+       |公开日期:$new_map("public_date")\n""".stripMargin
+  }
+
+  /**
+   * 变更时间
+   *
+   * @param new_map
+   * @return
+   */
+//  override def get_change_time(new_map: Map[String, String]): String = new_map("biz_date")
+
+  /**
+   * 风险等级
+   *
+   * @param old_map
+   * @param new_map
+   * @return
+   */
+  override def get_info_risk_level(old_map: Map[String, String], new_map: Map[String, String]): String = "高风险"
+}

+ 26 - 10
src/main/scala/com/winhc/bigdata/spark/utils/CompanyIncrForCidsUtils.scala

@@ -18,18 +18,34 @@ case class CompanyIncrForCidsUtils(s: SparkSession,
                                    mainTableName: String, //主表名(不加前辍)
                                    sublistTableName: String, //子表(不加前辍)
                                    dupliCols: Seq[String] // 去重列
-                                  ) extends LoggingUtils  with CompanyMapping{
+                                  ) extends LoggingUtils with CompanyMapping {
   @(transient@getter) val spark: SparkSession = s
 
   val tabMapping =
     Map("company_court_open_announcement" -> ("litigant_cids", ";") //开庭公告
     )
 
+  val funMap =
+    Map("company_court_announcement.case_no" -> "case_no(content)", //法院公告
+      "company_court_announcement.plaintiff" -> "replace_char(plaintiff)",
+      "company_court_announcement.litigant" -> "replace_char(litigant)"
+    )
+
+  //转换字段
+  def trans(s: String): String = {
+    val key = mainTableName + "." + s
+    var res = s
+    if (funMap.contains(key)) {
+      res = funMap(key)
+    }
+    res
+  }
+
   def calc(): Unit = {
     println(s"${this.getClass.getSimpleName} calc start! " + new Date().toString)
 
     prepareFunctions(spark)
-    val (split_cols,delimiter) = tabMapping.getOrElse(mainTableName,("cids",";"))
+    val (split_cols, delimiter) = tabMapping.getOrElse(mainTableName, ("cids", ";"))
 
     val inc_ods_company = s"${project}.inc_ods_company" //每日公司基本信息增量
     val ads_company_tb = s"${project}.ads_$mainTableName" //存量ads主表数据
@@ -39,17 +55,17 @@ case class CompanyIncrForCidsUtils(s: SparkSession,
     val inc_ads_company_tb_list = s"${project}.inc_ads_$sublistTableName" //增量数据ads 子表
 
     val sublistTableFieldName = spark.table(ads_company_tb_list).columns.filter(s => {
-      !s.equals("ds") && !s.equals("new_cid") && !s.equals("rowkey") && !s.equals("cids") && !s.equals("new_cids")&& !s.equals("cid")
+      !s.equals("ds") && !s.equals("new_cid") && !s.equals("rowkey") && !s.equals("cids") && !s.equals("new_cids") && !s.equals("cid")
     }).seq
 
     val sublistRes = spark.table(inc_ads_company_tb_list).columns.filter(s => {
       !s.equals("ds")
-    }).seq
+    }).map(trans).seq
 
 
     val subRes = spark.table(inc_ads_company_tb).columns.filter(s => {
       !s.equals("ds")
-    }).seq
+    }).map(trans).seq
 
     //存量表ads最新分区
     val remainDs = BaseUtil.getPartion(ads_company_tb, spark)
@@ -71,7 +87,7 @@ case class CompanyIncrForCidsUtils(s: SparkSession,
       runDs = BaseUtil.atDaysAfter(1, lastDsIncAds)
     }
 
-    val cols_md5 = dupliCols.filter(!_.equals("new_cid"))
+    val cols_md5 = dupliCols.filter(!_.equals("new_cid")).map(trans)
 
     //增量ods和增量ads最后一个分区相等,跳出
     if (lastDsIncOds.equals(lastDsIncAds)) {
@@ -96,8 +112,8 @@ case class CompanyIncrForCidsUtils(s: SparkSession,
     })
 
     //判断字段是否有重复字段
-    var f= "flag"
-    if(sublistTableFieldName.contains(f)){
+    var f = "flag"
+    if (sublistTableFieldName.contains(f)) {
       f = "update_flag"
     }
 
@@ -136,7 +152,7 @@ case class CompanyIncrForCidsUtils(s: SparkSession,
          |                    ,new_cid
          |                    ,cid
          |                    ,${sublistTableFieldName.mkString(",")}
-         |                    ,ROW_NUMBER() OVER (PARTITION BY cleanup(CONCAT_WS('',${dupliCols.mkString(",")})) ORDER BY update_time DESC ) num
+         |                    ,ROW_NUMBER() OVER (PARTITION BY cleanup(CONCAT_WS('',${dupliCols.map(trans).mkString(",")})) ORDER BY update_time DESC ) num
          |            FROM    (
          |                        SELECT  "0" AS $f
          |                                ,CAST(new_cid AS STRING) AS new_cid
@@ -189,7 +205,7 @@ case class CompanyIncrForCidsUtils(s: SparkSession,
          |SELECT  ${subRes.mkString(",")}
          |FROM    (
          |            SELECT  $split_cols,${columns.mkString(",")}
-         |                    ,ROW_NUMBER() OVER (PARTITION BY id ORDER BY update_time DESC ) num
+         |                    ,ROW_NUMBER() OVER (PARTITION BY cleanup(CONCAT_WS('',${cols_md5.mkString(",")})) ORDER BY update_time DESC ) num
          |                    ,md5(cleanup(CONCAT_WS('',${cols_md5.mkString(",")}))) AS rowkey
          |            FROM    ${inc_ods_company_tb}
          |            WHERE   ds >= ${runDs}