Browse Source

fix: 新流程任务完善

许家凯 3 years ago
parent
commit
b0f98fb087

+ 0 - 1
src/main/scala/com/winhc/bigdata/spark/ng/jobs/args_company_job.scala

@@ -13,7 +13,6 @@ case class args_company_job(tableName: String
                             , where: String = "" // ods层数据的过滤条件
                             , where: String = "" // ods层数据的过滤条件
                             , id_user_defined_rowkey: Boolean = false //是否读取ods层的用户自定义rowkey
                             , id_user_defined_rowkey: Boolean = false //是否读取ods层的用户自定义rowkey
                             , explode_args: Seq[explode_args] = null //如果表需要炸开,则配制此项
                             , explode_args: Seq[explode_args] = null //如果表需要炸开,则配制此项
-                            , export_2_es_fields: Seq[String] = null
                            )
                            )
 
 
 object args_company_job {
 object args_company_job {

+ 3 - 3
src/main/scala/com/winhc/bigdata/spark/ng/utils/CompanySummaryNg_new.scala

@@ -22,9 +22,9 @@ case class company_summary_args(table_name: String
                                 , groupByInfo: GroupByInfoNg = null
                                 , groupByInfo: GroupByInfoNg = null
                                 , where: String = ""
                                 , where: String = ""
                                 , sortField: String = "ds"
                                 , sortField: String = "ds"
-                               ){
-  def winhc_hash(): String ={
-    BaseUtil.cleanup(s"${table_name}_${companyIdField}").replaceAll("[\\(\\)() ]","")
+                               ) {
+  def winhc_hash(): String = {
+    BaseUtil.cleanup(s"${table_name}_${companyIdField}").replaceAll("[\\(\\)() ]", "")
   }
   }
 }
 }
 
 

+ 2 - 0
src/main/scala/com/winhc/bigdata/spark/ng/utils/explode_tab.scala

@@ -14,6 +14,8 @@ import scala.collection.mutable
 /**
 /**
  * @author: XuJiakai
  * @author: XuJiakai
  * @date: 2021/2/24 10:18
  * @date: 2021/2/24 10:18
+ *
+ *        多表炸开
  */
  */
 case class explode_args(org_field: String, json_path: String, alias: String, is_filter_null: Boolean = false)
 case class explode_args(org_field: String, json_path: String, alias: String, is_filter_null: Boolean = false)
 
 

+ 2 - 0
src/main/scala/com/winhc/bigdata/spark/ng/utils/merge_table.scala

@@ -11,6 +11,8 @@ import scala.collection.mutable
 /**
 /**
  * @author: XuJiakai
  * @author: XuJiakai
  * @date: 2021/2/23 16:20
  * @date: 2021/2/23 16:20
+ *
+ *        多表合并
  */
  */
 case class merge_table(s: SparkSession,
 case class merge_table(s: SparkSession,
                        tables: Seq[String], //需要合并的表名
                        tables: Seq[String], //需要合并的表名

+ 15 - 5
src/main/scala/com/winhc/bigdata/spark/utils/BaseUtil.scala

@@ -1,16 +1,17 @@
 package com.winhc.bigdata.spark.utils
 package com.winhc.bigdata.spark.utils
 
 
-import java.security.MessageDigest
-import java.text.SimpleDateFormat
-import java.util.regex.{Matcher, Pattern}
-import java.util.{Calendar, Date, Locale}
-
 import cn.hutool.core.util.StrUtil
 import cn.hutool.core.util.StrUtil
+import com.alibaba.fastjson.JSON
 import com.winhc.bigdata.spark.implicits.RegexUtils._
 import com.winhc.bigdata.spark.implicits.RegexUtils._
 import org.apache.commons.lang3.StringUtils
 import org.apache.commons.lang3.StringUtils
 import org.apache.commons.lang3.time.DateFormatUtils
 import org.apache.commons.lang3.time.DateFormatUtils
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.SparkSession
 
 
+import java.security.MessageDigest
+import java.text.SimpleDateFormat
+import java.util.regex.Pattern
+import java.util.{Calendar, Date, Locale}
+
 /**
 /**
  * @Author: XuJiakai
  * @Author: XuJiakai
  * @Date: 2020/6/3 18:49
  * @Date: 2020/6/3 18:49
@@ -28,6 +29,15 @@ object BaseUtil {
       pattern replaceAllIn(s, "")
       pattern replaceAllIn(s, "")
   }
   }
 
 
+  def is_json_str(str: String): Boolean = {
+    try {
+      JSON.parse(str)
+      true
+    } catch {
+      case exception: Exception => false
+    }
+  }
+
   def isWindows: Boolean = System.getProperty("os.name").contains("Windows")
   def isWindows: Boolean = System.getProperty("os.name").contains("Windows")
 
 
   def getPartitions(t: String, @transient spark: SparkSession): Seq[String] = {
   def getPartitions(t: String, @transient spark: SparkSession): Seq[String] = {