Преглед изворни кода

fix: 调整company_dishonest_info_person

许家凯 пре 4 година
родитељ
комит
79489db316

+ 4 - 4
src/main/scala/com/winhc/bigdata/spark/jobs/deadbeat/deadbeat_info.scala

@@ -25,7 +25,7 @@ case class deadbeat_info(s: SparkSession,
 
   private val filter_ele = Seq(
     "company_dishonest_info"
-    , "company_dishonest_info_human"
+    , "company_dishonest_info_person"
   )
 
   private def is_con(s: String): Boolean = {
@@ -36,11 +36,11 @@ case class deadbeat_info(s: SparkSession,
   }
 
   private val m = Map("company_dishonest_info" -> "失信人"
-    , "company_dishonest_info_human" -> "失信人"
+    , "company_dishonest_info_person" -> "失信人"
   )
 
   private val ids_m = Map("company_dishonest_info" -> "1"
-    , "company_dishonest_info_human" -> "2")
+    , "company_dishonest_info_person" -> "2")
 
   class person_agg_label extends UserDefinedAggregateFunction {
 
@@ -210,7 +210,7 @@ case class deadbeat_info(s: SparkSession,
     //参与预处理的表
     val mapTables = new mutable.HashMap[String, (String, String, String, String, String, String, String, String)]()
     mapTables("company_zxr") = ("rowkey", "cname", "card", "case_create_time", "deleted", "case_no", "court", "1")
-    mapTables("company_dishonest_info") = ("rowkey", "name", "card_num", "reg_time", "deleted", "case_no", "court", "2")
+    mapTables("company_dishonest_info") = ("rowkey", "name", "card_num", "pub_date", "deleted", "case_no", "court", "2")
     mapTables("company_zxr_final_case") = ("rowkey", "name", "identity_num", "case_create_time", "deleted", "case_no", "court_name", "3")
     mapTables("company_zxr_restrict") = ("rowkey", "name", "identity_num", "case_create_time", "deleted", "case_no", "court_name", "4")
     is_id_card()

+ 1 - 1
src/main/scala/com/winhc/bigdata/spark/jobs/deadbeat/dishonest_info.scala

@@ -21,7 +21,7 @@ case class dishonest_info(s: SparkSession,
 
   private def calc(is_person: Boolean = true): Unit = {
     val tn = is_person match {
-      case true => "company_dishonest_info_human"
+      case true => "company_dishonest_info_person"
       case false => "company_dishonest_info"
     }
     val cid = is_person match {

+ 2 - 2
src/main/scala/com/winhc/bigdata/spark/jobs/judicial/JudicialCaseRelationPre39.scala

@@ -22,7 +22,7 @@ case class JudicialCaseRelationPre39(s: SparkSession,
   private val target_table = "ads_judicial_case_relation_pre"
 
   val flag_map = Map("company_dishonest_info" -> "3"
-    , "company_dishonest_info_human" -> "9")
+    , "company_dishonest_info_person" -> "9")
 
 
   def company_dishonest_info(is_inc: Boolean = true): Unit = {
@@ -146,7 +146,7 @@ case class JudicialCaseRelationPre39(s: SparkSession,
 
     }
 
-    for (t <- Seq("company_dishonest_info", "company_dishonest_info_human"))
+    for (t <- Seq("company_dishonest_info", "company_dishonest_info_person"))
       if (is_inc)
         inc(t)
       else

+ 2 - 1
src/main/scala/com/winhc/bigdata/spark/utils/DataTypeUtils.scala

@@ -1,6 +1,6 @@
 package com.winhc.bigdata.spark.utils
 
-import org.apache.spark.sql.types.{DataType, DoubleType, LongType, StringType, TimestampType}
+import org.apache.spark.sql.types.{DataType, DoubleType, LongType, NullType, StringType, TimestampType}
 
 /**
  * @Author: XuJiakai
@@ -22,6 +22,7 @@ object DataTypeUtils {
       case DoubleType=>{
         "DOUBLE"
       }
+      case NullType => "string"
       case _ => {
         println(s"other type:${dataType.typeName}")
         throw new RuntimeException