David 1 month ago
parent
commit
e0d170c398

+ 45 - 20
data_processing/data_operation/data_handler.py

@@ -24,9 +24,9 @@ class DataHandler(object):
             if len(df) < self.opt.Model["time_step"]:
             if len(df) < self.opt.Model["time_step"]:
                 self.logger.info("特征处理-训练数据-不满足time_step")
                 self.logger.info("特征处理-训练数据-不满足time_step")
             if lstm_type == 2:
             if lstm_type == 2:
-                datax, datay = self.get_timestep_features_lstm2(df, col_time, target, is_train=True)
+                datax, datay = self.get_timestep_features_lstm2(df, col_time, target, is_train=True, time_series=time_series)
             elif lstm_type == 3:
             elif lstm_type == 3:
-                datax, datay = self.get_timestep_features_bilstm(df, col_time, target, is_train=True)
+                datax, datay = self.get_timestep_features_bilstm(df, col_time, target, is_train=True, time_series=time_series)
             else:
             else:
                 datax, datay = self.get_timestep_features(df, col_time, target, is_train=True, time_series=time_series)
                 datax, datay = self.get_timestep_features(df, col_time, target, is_train=True, time_series=time_series)
             if len(datax) < 10:
             if len(datax) < 10:
@@ -46,17 +46,23 @@ class DataHandler(object):
 
 
         return train_x, valid_x, train_y, valid_y
         return train_x, valid_x, train_y, valid_y
 
 
-    def get_predict_data(self, dfs, time_series=1):
+    def get_predict_data(self, dfs, time_series=1, lstm_type=1):
         test_x = []
         test_x = []
         for i, df in enumerate(dfs, start=1):
         for i, df in enumerate(dfs, start=1):
-            if len(df) < self.opt.Model["time_step"]:
+            if len(df) < self.opt.Model["time_step"]*time_series:
                 self.logger.info("特征处理-预测数据-不满足time_step")
                 self.logger.info("特征处理-预测数据-不满足time_step")
                 continue
                 continue
-            datax = self.get_predict_features(df, time_series)
+            if lstm_type == 2:
+                datax = self.get_predict_features2(df, time_series)
+            elif lstm_type == 3:
+                datax = self.get_predict_features3(df, time_series)
+            else:
+                datax = self.get_predict_features(df, time_series)
             test_x.append(datax)
             test_x.append(datax)
         test_x = np.concatenate(test_x, axis=0)
         test_x = np.concatenate(test_x, axis=0)
         return test_x
         return test_x
 
 
+
     def get_predict_features(self, norm_data, time_series=1):
     def get_predict_features(self, norm_data, time_series=1):
         """
         """
         均分数据,获取预测数据集
         均分数据,获取预测数据集
@@ -74,6 +80,26 @@ class DataHandler(object):
             features_x = np.concatenate((features_x, np.expand_dims(df_repeated, 0)), axis=0)
             features_x = np.concatenate((features_x, np.expand_dims(df_repeated, 0)), axis=0)
         return features_x
         return features_x
 
 
+    def get_predict_features2(self, norm_data, time_series=2):
+        """
+        lstm2:以时间步长切分数据,获取预测数据集
+        """
+        time_step = self.opt.Model["time_step"]
+        feature_data = norm_data.reset_index(drop=True)
+        time_step_loc = time_step*time_series - 1
+        features_x = np.array([feature_data.loc[i:i + time_step_loc, self.opt.features].reset_index(drop=True) for i in range(0, len(norm_data) - time_step_loc, time_step)])
+        return features_x
+
+    def get_predict_features3(self, norm_data, time_series=3):
+        """
+        均分数据,获取预测数据集
+        """
+        time_step = self.opt.Model["time_step"]
+        feature_data = norm_data.reset_index(drop=True)
+        time_step_loc = time_step*time_series - 1
+        features_x = np.array([feature_data.loc[i:i+time_step_loc , self.opt.features].reset_index(drop=True) for i in range(0, len(norm_data) - time_step_loc, time_step*(time_series-2))])
+        return features_x
+
     def get_timestep_features(self, norm_data, col_time, target, is_train, time_series=1):
     def get_timestep_features(self, norm_data, col_time, target, is_train, time_series=1):
         """
         """
         步长分割数据,获取时序训练集
         步长分割数据,获取时序训练集
@@ -92,37 +118,36 @@ class DataHandler(object):
             features_y.append(row[1])
             features_y.append(row[1])
         return features_x, features_y
         return features_x, features_y
 
 
-    def get_timestep_features_lstm2(self, norm_data, col_time, target, is_train):
+    def get_timestep_features_lstm2(self, norm_data, col_time, target, is_train, time_series=2):
         """
         """
-        步长分割数据,获取时序训练集
+        步长分割数据,获取最后一个时间步长作为训练集
         """
         """
         time_step = self.opt.Model["time_step"]
         time_step = self.opt.Model["time_step"]
         feature_data = norm_data.reset_index(drop=True)
         feature_data = norm_data.reset_index(drop=True)
-        time_step_loc = time_step*2 - 1
+        time_step_loc = time_step*time_series - 1
         train_num = int(len(feature_data))
         train_num = int(len(feature_data))
         label_features = [col_time, target] if is_train is True else [col_time, target]
         label_features = [col_time, target] if is_train is True else [col_time, target]
         nwp_cs = self.opt.features
         nwp_cs = self.opt.features
-        nwp = [feature_data.loc[i:i + time_step_loc, nwp_cs].reset_index(drop=True) for i in range(train_num - time_step*2 + 1)]  # 数据库字段 'C_T': 'C_WS170'
-        labels = [feature_data.loc[i: i+time_step_loc, label_features].reset_index(drop=True) for i in range(train_num - time_step*2 + 1)]
+        nwp = [feature_data.loc[i:i + time_step_loc, nwp_cs].reset_index(drop=True) for i in range(train_num - time_step*time_series + 1)]  # 数据库字段 'C_T': 'C_WS170'
+        labels = [feature_data.loc[i+time_step_loc-time_step+1: i+time_step_loc, label_features].reset_index(drop=True) for i in range(train_num - time_step*time_series + 1)]
         features_x, features_y = [], []
         features_x, features_y = [], []
         for i, row in enumerate(zip(nwp, labels)):
         for i, row in enumerate(zip(nwp, labels)):
             features_x.append(row[0])
             features_x.append(row[0])
             features_y.append(row[1])
             features_y.append(row[1])
         return features_x, features_y
         return features_x, features_y
 
 
-    def get_timestep_features_bilstm(self, norm_data, col_time, target, is_train):
+    def get_timestep_features_bilstm(self, norm_data, col_time, target, is_train, time_series=3):
         """
         """
-        步长分割数据,获取时序训练集
+        步长分割数据,获取中间的时间步长作为训练集
         """
         """
         time_step = self.opt.Model["time_step"]
         time_step = self.opt.Model["time_step"]
         feature_data = norm_data.reset_index(drop=True)
         feature_data = norm_data.reset_index(drop=True)
-        time_step_loc = time_step*3 - 1
-        time_step_m = time_step*2 - 1
+        time_step_loc = time_step*time_series - 1
         train_num = int(len(feature_data))
         train_num = int(len(feature_data))
         label_features = [col_time, target] if is_train is True else [col_time, target]
         label_features = [col_time, target] if is_train is True else [col_time, target]
         nwp_cs = self.opt.features
         nwp_cs = self.opt.features
-        nwp = [feature_data.loc[i:i + time_step_loc, nwp_cs].reset_index(drop=True) for i in range(train_num - time_step*3 + 1)]  # 数据库字段 'C_T': 'C_WS170'
-        labels = [feature_data.loc[i: i+time_step_loc, label_features].reset_index(drop=True) for i in range(train_num - time_step*3 + 1)]
+        nwp = [feature_data.loc[i:i + time_step_loc, nwp_cs].reset_index(drop=True) for i in range(train_num - time_step*time_series + 1)]  # 数据库字段 'C_T': 'C_WS170'
+        labels = [feature_data.loc[i+time_step: i+time_step_loc-time_step, label_features].reset_index(drop=True) for i in range(train_num - time_step*time_series + 1)]
         features_x, features_y = [], []
         features_x, features_y = [], []
         for i, row in enumerate(zip(nwp, labels)):
         for i, row in enumerate(zip(nwp, labels)):
             features_x.append(row[0])
             features_x.append(row[0])
@@ -205,7 +230,7 @@ class DataHandler(object):
                 vy.append(data[1])
                 vy.append(data[1])
         return tx, vx, ty, vy
         return tx, vx, ty, vy
 
 
-    def train_data_handler(self, data, bp_data=False, time_series=1):
+    def train_data_handler(self, data, bp_data=False, time_series=1, lstm_type=1):
         """
         """
         训练数据预处理:
         训练数据预处理:
         清洗+补值+归一化
         清洗+补值+归一化
@@ -257,10 +282,10 @@ class DataHandler(object):
             train_x, valid_x, train_y, valid_y = self.train_valid_split(train_data[self.opt.features].values, train_data[target].values, valid_rate=self.opt.Model["valid_data_rate"], shuffle=self.opt.Model['shuffle_train_data'])
             train_x, valid_x, train_y, valid_y = self.train_valid_split(train_data[self.opt.features].values, train_data[target].values, valid_rate=self.opt.Model["valid_data_rate"], shuffle=self.opt.Model['shuffle_train_data'])
             train_x, valid_x, train_y, valid_y =  np.array(train_x), np.array(valid_x), np.array(train_y), np.array(valid_y)
             train_x, valid_x, train_y, valid_y =  np.array(train_x), np.array(valid_x), np.array(train_y), np.array(valid_y)
         else:
         else:
-            train_x, valid_x, train_y, valid_y = self.get_train_data(train_datas, col_time, target, time_series)
+            train_x, valid_x, train_y, valid_y = self.get_train_data(train_datas, col_time, target, time_series, lstm_type)
         return train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap
         return train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap
 
 
-    def pre_data_handler(self, data, feature_scaler, bp_data=False, time_series=1):
+    def pre_data_handler(self, data, feature_scaler, bp_data=False, time_series=1, lstm_type=1):
         """
         """
         预测数据简单处理
         预测数据简单处理
         Args:
         Args:
@@ -286,5 +311,5 @@ class DataHandler(object):
         if bp_data:
         if bp_data:
             pre_x = np.array(pre_data)
             pre_x = np.array(pre_data)
         else:
         else:
-            pre_x = self.get_predict_data([pre_data], time_series)
+            pre_x = self.get_predict_data([pre_data], time_series, lstm_type)
         return pre_x, data
         return pre_x, data

+ 2 - 1
models_processing/model_tf/tf_bilstm.py

@@ -38,6 +38,7 @@ class TSHandler(object):
 
 
     @staticmethod
     @staticmethod
     def get_keras_model(opt, time_series=3):
     def get_keras_model(opt, time_series=3):
+        assert time_series >= 3
         loss = region_loss(opt)
         loss = region_loss(opt)
         l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
         l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
         nwp_input = Input(shape=(opt.Model['time_step']*time_series, opt.Model['input_size']), name='nwp')
         nwp_input = Input(shape=(opt.Model['time_step']*time_series, opt.Model['input_size']), name='nwp')
@@ -46,7 +47,7 @@ class TSHandler(object):
         con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
         con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
         nwp_bi_lstm = Bidirectional(LSTM(units=opt.Model['hidden_size'], return_sequences=False,  kernel_regularizer=l2_reg), merge_mode='concat')(con1_p) # 默认拼接双向输出(最终维度=2*hidden_size)
         nwp_bi_lstm = Bidirectional(LSTM(units=opt.Model['hidden_size'], return_sequences=False,  kernel_regularizer=l2_reg), merge_mode='concat')(con1_p) # 默认拼接双向输出(最终维度=2*hidden_size)
 
 
-        output = Dense(opt.Model['time_step']*time_series, name='cdq_output')(nwp_bi_lstm)
+        output = Dense(opt.Model['time_step']*(time_series-2), name='cdq_output')(nwp_bi_lstm)
 
 
         model = Model(nwp_input, output)
         model = Model(nwp_input, output)
         adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
         adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)

+ 5 - 3
models_processing/model_tf/tf_lstm.py

@@ -37,7 +37,7 @@ class TSHandler(object):
             self.logger.info("加载模型权重失败:{}".format(e.args))
             self.logger.info("加载模型权重失败:{}".format(e.args))
 
 
     @staticmethod
     @staticmethod
-    def get_keras_model(opt, time_series=1):
+    def get_keras_model(opt, time_series=1, lstm_type=1):
         loss = region_loss(opt)
         loss = region_loss(opt)
         l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
         l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
         l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
         l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
@@ -46,8 +46,10 @@ class TSHandler(object):
         con1 = Conv1D(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
         con1 = Conv1D(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
         con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
         con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
         nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg)(con1_p)
         nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg)(con1_p)
-
-        output = Dense(opt.Model['time_step']*time_series, name='cdq_output')(nwp_lstm)
+        if lstm_type == 2:
+            output = Dense(opt.Model['time_step'], name='cdq_output')(nwp_lstm)
+        else:
+            output = Dense(opt.Model['time_step']*time_series, name='cdq_output')(nwp_lstm)
 
 
         model = Model(nwp_input, output)
         model = Model(nwp_input, output)
         adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
         adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)

+ 2 - 1
models_processing/model_tf/tf_lstm2_pre.py

@@ -36,6 +36,7 @@ def update_config():
     request_args = request.values.to_dict()
     request_args = request.values.to_dict()
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
+    request_args['time_series'] = request_args.get('time_series', 2)
     current_config.update(request_args)
     current_config.update(request_args)
 
 
     # 存储到请求上下文
     # 存储到请求上下文
@@ -62,7 +63,7 @@ def model_prediction_bp():
         ts.opt.cap = round(target_scaler.transform(np.array([[float(args['cap'])]]))[0, 0], 2)
         ts.opt.cap = round(target_scaler.transform(np.array([[float(args['cap'])]]))[0, 0], 2)
         ts.get_model(args)
         ts.get_model(args)
         dh.opt.features = json.loads(ts.model_params)['Model']['features'].split(',')
         dh.opt.features = json.loads(ts.model_params)['Model']['features'].split(',')
-        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=2)
+        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=args['time_series'], lstm_type=2)
         res = list(chain.from_iterable(target_scaler.inverse_transform(ts.predict(scaled_pre_x))))
         res = list(chain.from_iterable(target_scaler.inverse_transform(ts.predict(scaled_pre_x))))
         pre_data['farm_id'] = args.get('farm_id', 'null')
         pre_data['farm_id'] = args.get('farm_id', 'null')
         if int(args.get('algorithm_test', 0)):
         if int(args.get('algorithm_test', 0)):

+ 6 - 5
models_processing/model_tf/tf_lstm2_train.py

@@ -31,6 +31,7 @@ def update_config():
     request_args = request.values.to_dict()
     request_args = request.values.to_dict()
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
+    request_args['time_series'] = request_args.get('time_series', 2)
     current_config.update(request_args)
     current_config.update(request_args)
 
 
     # 存储到请求上下文
     # 存储到请求上下文
@@ -52,24 +53,24 @@ def model_training_bp():
     try:
     try:
         # ------------ 获取数据,预处理训练数据 ------------
         # ------------ 获取数据,预处理训练数据 ------------
         train_data = get_data_from_mongo(args)
         train_data = get_data_from_mongo(args)
-        train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=2)
+        train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=args['time_series'], lstm_type=2)
         ts.opt.cap = round(scaled_cap, 2)
         ts.opt.cap = round(scaled_cap, 2)
         ts.opt.Model['input_size'] = len(dh.opt.features)
         ts.opt.Model['input_size'] = len(dh.opt.features)
         # ------------ 训练模型,保存模型 ------------
         # ------------ 训练模型,保存模型 ------------
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt, time_series=2)
+        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt, time_series=args['time_series'], lstm_type=2)
         if ts.opt.Model['add_train']:
         if ts.opt.Model['add_train']:
             if model:
             if model:
                 feas = json.loads(ts.model_params)['features']
                 feas = json.loads(ts.model_params)['features']
                 if set(feas).issubset(set(dh.opt.features)):
                 if set(feas).issubset(set(dh.opt.features)):
                     dh.opt.features = list(feas)
                     dh.opt.features = list(feas)
-                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=2)
+                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=args['time_series'], lstm_type=2)
                 else:
                 else:
-                    model = ts.get_keras_model(ts.opt, time_series=2)
+                    model = ts.get_keras_model(ts.opt, time_series=args['time_series'], lstm_type=2)
                     logger.info("训练数据特征,不满足,加强训练模型特征")
                     logger.info("训练数据特征,不满足,加强训练模型特征")
             else:
             else:
-                model = ts.get_keras_model(ts.opt, time_series=2)
+                model = ts.get_keras_model(ts.opt, time_series=args['time_series'], lstm_type=2)
         ts_model = ts.training(model, [train_x, train_y, valid_x, valid_y])
         ts_model = ts.training(model, [train_x, train_y, valid_x, valid_y])
         args['Model']['features'] = ','.join(dh.opt.features)
         args['Model']['features'] = ','.join(dh.opt.features)
         args['params'] = json.dumps(args)
         args['params'] = json.dumps(args)

+ 2 - 1
models_processing/model_tf/tf_lstm3_pre.py

@@ -35,6 +35,7 @@ def update_config():
     request_args = request.values.to_dict()
     request_args = request.values.to_dict()
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
+    request_args['time_series'] = request_args.get('time_series', 3)
     current_config.update(request_args)
     current_config.update(request_args)
 
 
     # 存储到请求上下文
     # 存储到请求上下文
@@ -61,7 +62,7 @@ def model_prediction_bp():
         ts.opt.cap = round(target_scaler.transform(np.array([[float(args['cap'])]]))[0, 0], 2)
         ts.opt.cap = round(target_scaler.transform(np.array([[float(args['cap'])]]))[0, 0], 2)
         ts.get_model(args)
         ts.get_model(args)
         dh.opt.features = json.loads(ts.model_params)['Model']['features'].split(',')
         dh.opt.features = json.loads(ts.model_params)['Model']['features'].split(',')
-        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=3)
+        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=args['time_series'], lstm_type=3)
         res = list(chain.from_iterable(target_scaler.inverse_transform(ts.predict(scaled_pre_x))))
         res = list(chain.from_iterable(target_scaler.inverse_transform(ts.predict(scaled_pre_x))))
         pre_data['farm_id'] = args.get('farm_id', 'null')
         pre_data['farm_id'] = args.get('farm_id', 'null')
         if int(args.get('algorithm_test', 0)):
         if int(args.get('algorithm_test', 0)):

+ 6 - 5
models_processing/model_tf/tf_lstm3_train.py

@@ -31,6 +31,7 @@ def update_config():
     request_args = request.values.to_dict()
     request_args = request.values.to_dict()
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
     request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features']
+    request_args['time_series'] = request_args.get('time_series', 3)
     current_config.update(request_args)
     current_config.update(request_args)
 
 
     # 存储到请求上下文
     # 存储到请求上下文
@@ -52,24 +53,24 @@ def model_training_bp():
     try:
     try:
         # ------------ 获取数据,预处理训练数据 ------------
         # ------------ 获取数据,预处理训练数据 ------------
         train_data = get_data_from_mongo(args)
         train_data = get_data_from_mongo(args)
-        train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=3)
+        train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=args['time_series'], lstm_type=3)
         ts.opt.cap = round(scaled_cap, 2)
         ts.opt.cap = round(scaled_cap, 2)
         ts.opt.Model['input_size'] = len(dh.opt.features)
         ts.opt.Model['input_size'] = len(dh.opt.features)
         # ------------ 训练模型,保存模型 ------------
         # ------------ 训练模型,保存模型 ------------
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt)
+        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt, time_series=args['time_series'])
         if ts.opt.Model['add_train']:
         if ts.opt.Model['add_train']:
             if model:
             if model:
                 feas = json.loads(ts.model_params)['features']
                 feas = json.loads(ts.model_params)['features']
                 if set(feas).issubset(set(dh.opt.features)):
                 if set(feas).issubset(set(dh.opt.features)):
                     dh.opt.features = list(feas)
                     dh.opt.features = list(feas)
-                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=3)
+                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=args['time_series'], lstm_type=3)
                 else:
                 else:
-                    model = ts.get_keras_model(ts.opt)
+                    model = ts.get_keras_model(ts.opt, time_series=args['time_series'])
                     logger.info("训练数据特征,不满足,加强训练模型特征")
                     logger.info("训练数据特征,不满足,加强训练模型特征")
             else:
             else:
-                model = ts.get_keras_model(ts.opt)
+                model = ts.get_keras_model(ts.opt, time_series=args['time_series'])
         ts_model = ts.training(model, [train_x, train_y, valid_x, valid_y])
         ts_model = ts.training(model, [train_x, train_y, valid_x, valid_y])
         args['Model']['features'] = ','.join(dh.opt.features)
         args['Model']['features'] = ','.join(dh.opt.features)
         args['params'] = json.dumps(args)
         args['params'] = json.dumps(args)

+ 1 - 1
models_processing/model_tf/tf_lstm_pre.py

@@ -63,7 +63,7 @@ def model_prediction_bp():
         ts.opt.cap = round(target_scaler.transform(np.array([[float(args['cap'])]]))[0, 0], 2)
         ts.opt.cap = round(target_scaler.transform(np.array([[float(args['cap'])]]))[0, 0], 2)
         ts.get_model(args)
         ts.get_model(args)
         dh.opt.features = json.loads(ts.model_params)['Model']['features'].split(',')
         dh.opt.features = json.loads(ts.model_params)['Model']['features'].split(',')
-        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=args['time_series'])
+        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=args['time_series'], lstm_type=1)
         res = list(chain.from_iterable(target_scaler.inverse_transform(ts.predict(scaled_pre_x))))
         res = list(chain.from_iterable(target_scaler.inverse_transform(ts.predict(scaled_pre_x))))
         pre_data['farm_id'] = args.get('farm_id', 'null')
         pre_data['farm_id'] = args.get('farm_id', 'null')
         if int(args.get('algorithm_test', 0)):
         if int(args.get('algorithm_test', 0)):

+ 3 - 3
models_processing/model_tf/tf_lstm_train.py

@@ -59,7 +59,7 @@ def model_training_bp():
         # ------------ 训练模型,保存模型 ------------
         # ------------ 训练模型,保存模型 ------------
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt, time_series=args['time_series'])
+        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt, time_series=args['time_series'], lstm_type=1)
         if ts.opt.Model['add_train']:
         if ts.opt.Model['add_train']:
             if model:
             if model:
                 feas = json.loads(ts.model_params)['features']
                 feas = json.loads(ts.model_params)['features']
@@ -67,10 +67,10 @@ def model_training_bp():
                     dh.opt.features = list(feas)
                     dh.opt.features = list(feas)
                     train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=args['time_series'])
                     train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=args['time_series'])
                 else:
                 else:
-                    model = ts.get_keras_model(ts.opt, time_series=args['time_series'])
+                    model = ts.get_keras_model(ts.opt, time_series=args['time_series'], lstm_type=1)
                     logger.info("训练数据特征,不满足,加强训练模型特征")
                     logger.info("训练数据特征,不满足,加强训练模型特征")
             else:
             else:
-                model = ts.get_keras_model(ts.opt, time_series=args['time_series'])
+                model = ts.get_keras_model(ts.opt, time_series=args['time_series'], lstm_type=1)
         ts_model = ts.training(model, [train_x, train_y, valid_x, valid_y])
         ts_model = ts.training(model, [train_x, train_y, valid_x, valid_y])
         args['Model']['features'] = ','.join(dh.opt.features)
         args['Model']['features'] = ','.join(dh.opt.features)
         args['params'] = json.dumps(args)
         args['params'] = json.dumps(args)