Ver Fonte

Merge branch 'dev_david' of anweiguo/algorithm_platform into dev_awg

liudawei há 3 meses atrás
pai
commit
406e95b454

+ 3 - 3
models_processing/model_koi/losses.py

@@ -56,8 +56,8 @@ class SouthLoss(Loss):
                  name: str = "south_loss",
                  reduction: str = "sum_over_batch_size"):
         # 参数校验
-        if not 0 <= cap <= 1:
-            raise ValueError("cap 必须为归一化后的值且位于 [0,1] 区间")
+        # if not 0 <= cap <= 1:
+        #     raise ValueError("cap 必须为归一化后的值且位于 [0,1] 区间")
 
         super().__init__(name=name, reduction=reduction)
 
@@ -144,7 +144,7 @@ region_loss_d = {
 
 # 根据地区调用对应逻辑
 def region_loss(opt):
-    handler = region_loss_d.get(opt.region, opt.region)
+    handler = region_loss_d.get(opt.region, 'northeast')
     # 判断处理类型并执行
     if callable(handler):
         # 如果是lambda或函数,直接调用

+ 3 - 2
models_processing/model_koi/tf_bp_pre.py

@@ -23,7 +23,8 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_bp_pre——service')
 
 with app.app_context():
-    with open('./models_processing/model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    with open(os.path.join(current_dir, 'bp.yaml'), 'r', encoding='utf-8') as f:
         args = yaml.safe_load(f)
     dh = DataHandler(logger, args)
     bp = BPHandler(logger, args)
@@ -74,7 +75,7 @@ def model_prediction_bp():
         pre_data = pre_data[res_cols]
 
         pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
-        pre_data.loc[pre_data['power_forecast'] > g.opt.cap, 'power_forecast'] = g.opt.cap
+        pre_data.loc[pre_data['power_forecast'] > args['cap'], 'power_forecast'] = args['cap']
         pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
 
         insert_data_into_mongo(pre_data, args)

+ 2 - 1
models_processing/model_koi/tf_bp_train.py

@@ -21,7 +21,8 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_bp_train——service')
 
 with app.app_context():
-    with open('./models_processing/model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    with open(os.path.join(current_dir, 'bp.yaml'), 'r', encoding='utf-8') as f:
         args = yaml.safe_load(f)
     dh = DataHandler(logger, args)
     bp = BPHandler(logger, args)

+ 4 - 3
models_processing/model_koi/tf_cnn_pre.py

@@ -12,7 +12,7 @@ from common.database_dml import *
 from common.processing_data_common import missing_features, str_to_list
 from data_processing.data_operation.data_handler import DataHandler
 from threading import Lock
-import time, yaml
+import time, yaml, os
 model_lock = Lock()
 from itertools import chain
 from common.logs import Log
@@ -24,7 +24,8 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_cnn_pre——service')
 
 with app.app_context():
-    with open('./models_processing/model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    with open(os.path.join(current_dir, 'cnn.yaml'), 'r', encoding='utf-8') as f:
         args = yaml.safe_load(f)
 
     dh = DataHandler(logger, args)
@@ -73,7 +74,7 @@ def model_prediction_bp():
         pre_data = pre_data[res_cols]
 
         pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
-        pre_data.loc[pre_data['power_forecast'] > g.opt.cap, 'power_forecast'] = g.opt.cap
+        pre_data.loc[pre_data['power_forecast'] > args['cap'], 'power_forecast'] = args['cap']
         pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
 
         insert_data_into_mongo(pre_data, args)

+ 2 - 1
models_processing/model_koi/tf_cnn_train.py

@@ -21,7 +21,8 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_cnn_train——service')
 
 with app.app_context():
-    with open('./models_processing/model_koi/cnn.yaml', 'r', encoding='utf-8') as f:
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    with open(os.path.join(current_dir, 'cnn.yaml'), 'r', encoding='utf-8') as f:
         args = yaml.safe_load(f)
 
     dh = DataHandler(logger, args)

+ 3 - 2
models_processing/model_koi/tf_lstm_pre.py

@@ -24,7 +24,8 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_lstm_pre——service')
 
 with app.app_context():
-    with open('./models_processing/model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    with open(os.path.join(current_dir, 'lstm.yaml'), 'r', encoding='utf-8') as f:
         args = yaml.safe_load(f)
 
     dh = DataHandler(logger, args)
@@ -72,7 +73,7 @@ def model_prediction_bp():
         pre_data = pre_data[res_cols]
 
         pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
-        pre_data.loc[pre_data['power_forecast'] > g.opt.cap, 'power_forecast'] = g.opt.cap
+        pre_data.loc[pre_data['power_forecast'] > args['cap'], 'power_forecast'] = args['cap']
         pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
 
         insert_data_into_mongo(pre_data, args)

+ 3 - 1
models_processing/model_koi/tf_lstm_train.py

@@ -19,7 +19,8 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_lstm_train——service')
 
 with app.app_context():
-    with open('./models_processing/model_koi/lstm.yaml', 'r', encoding='utf-8') as f:
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    with open(os.path.join(current_dir, 'lstm.yaml'), 'r', encoding='utf-8') as f:
         args = yaml.safe_load(f)
 
     dh = DataHandler(logger, args)
@@ -41,6 +42,7 @@ def model_training_bp():
     # 获取程序开始时间
     start_time = time.time()
     result = {}
+    success = 0
     print("Program starts execution!")
     try:
         # ------------ 获取数据,预处理训练数据 ------------