Parcourir la source

fix: 添加默认inc_ads空分区

许家凯 il y a 3 ans
Parent
commit
c6dd75a802

+ 2 - 0
src/main/scala/com/winhc/bigdata/spark/ng/jobs/CompanyIndexJob.scala

@@ -279,6 +279,8 @@ case class CompanyIndexJob(s: SparkSession,
          |    ds='$target_ds'
          |""".stripMargin)
 
+    addEmptyPartitionOrSkip(target_tab, target_ds)
+    addEmptyPartitionOrSkip(target_tab_simp, target_ds)
   }
 
   private def tab_verify(out_f: Seq[String], tab: String, ignore_f: Seq[String] = Seq("ds")): Unit = {

+ 2 - 0
src/main/scala/com/winhc/bigdata/spark/ng/jobs/general_handler.scala

@@ -151,6 +151,8 @@ case class general_handler(s: SparkSession,
          |        ) AS t2
          |WHERE   t2.num = 1
          |""".stripMargin)
+
+    addEmptyPartitionOrSkip(inc_ads_tab, target_ds)
   }
 
 

+ 1 - 0
src/main/scala/com/winhc/bigdata/spark/ng/jobs/inc_company_ng.scala

@@ -123,6 +123,7 @@ case class inc_company_ng(s: SparkSession,
          |        ) AS t2
          |WHERE   t2.num = 1
          |""".stripMargin)
+    addEmptyPartitionOrSkip(inc_ads_tab, target_ds)
   }
 
   def calc(): Unit = {

+ 7 - 0
src/main/scala/com/winhc/bigdata/spark/udf/BaseFunc.scala

@@ -27,6 +27,13 @@ trait BaseFunc {
      })
    }*/
 
+  def addEmptyPartitionOrSkip(tab:String,ds:String): Unit ={
+    spark.sql(
+      s"""
+         |ALTER TABLE $tab ADD IF NOT EXISTS PARTITION(ds='$ds')
+         |""".stripMargin)
+  }
+
   def case_no_trim_udf(): Unit = {
     spark.udf.register("case_no_trim", case_no_trim _)
   }