David 2 ماه پیش
والد
کامیت
71fdf7beef
3فایلهای تغییر یافته به همراه43 افزوده شده و 38 حذف شده
  1. 4 4
      models_processing/model_koi/tf_bp_train.py
  2. 2 2
      models_processing/model_koi/tf_cnn.py
  3. 37 32
      models_processing/model_koi/tf_cnn_train.py

+ 4 - 4
models_processing/model_koi/tf_bp_train.py

@@ -48,10 +48,10 @@ def model_training_bp():
     # ------------ 训练模型,保存模型 ------------
     opt.Model['input_size'] = train_x.shape[1]
     bp_model = bp.training(opt, [train_x, train_y, valid_x, valid_y])
-    args_dict['params'] = json.dumps(args)
-    args_dict['descr'] = '测试'
-    args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
-    insert_trained_model_into_mongo(bp_model, args_dict)
+    args['params'] = json.dumps(args)
+    args['descr'] = '测试'
+    args['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+    insert_trained_model_into_mongo(bp_model, args)
     insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args)
     success = 1
     # except Exception as e:

+ 2 - 2
models_processing/model_koi/tf_cnn.py

@@ -46,7 +46,7 @@ class CNNHandler(object):
 
         output = Dense(1, name='d5')(nwp)
         output_f = Flatten()(output)
-        model = Model(nwp_input, output_f)
+        model = Model(inputs=nwp_input, outputs=output_f)
         adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
         reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=5, verbose=1)
         model.compile(loss=rmse, optimizer=adam)
@@ -73,7 +73,7 @@ class CNNHandler(object):
         print("++++++++++", np.array(train_x[1]).shape)
         model.summary()
         early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
-        history = model.fit(train_x, train_y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,  validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
+        history = model.fit(train_x, train_y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2, validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
         loss = np.round(history.history['loss'], decimals=5)
         val_loss = np.round(history.history['val_loss'], decimals=5)
         self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))

+ 37 - 32
models_processing/model_koi/tf_cnn_train.py

@@ -23,12 +23,12 @@ app = Flask('tf_cnn_train——service')
 
 with app.app_context():
     with open('../model_koi/cnn.yaml', 'r', encoding='utf-8') as f:
-        arguments = yaml.safe_load(f)
+        args = yaml.safe_load(f)
 
-    dh = DataHandler(logger, arguments)
+    dh = DataHandler(logger, args)
     cnn = CNNHandler(logger)
 
-@app.route('/nn_bp_training', methods=['POST'])
+@app.route('/nn_cnn_training', methods=['POST'])
 def model_training_bp():
     # 获取程序开始时间
     start_time = time.time()
@@ -36,23 +36,28 @@ def model_training_bp():
     success = 0
     print("Program starts execution!")
     args_dict = request.values.to_dict()
-    args = arguments.deepcopy()
+    args_dict['features'] = args_dict['features'].split(',')
     args.update(args_dict)
+    opt = argparse.Namespace(**args)
+    logger.info(args_dict)
     try:
-        opt = argparse.Namespace(**args)
-        logger.info(args_dict)
         train_data = get_data_from_mongo(args_dict)
         train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt)
+        opt.Model['input_size'] = train_x.shape[2]
         bp_model = cnn.training(opt, [train_x, valid_x, train_y, valid_y])
-        args_dict['params'] = json.dumps(args)
-        args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
-        insert_trained_model_into_mongo(bp_model, args_dict)
+
+        args['params'] = json.dumps(args)
+        args['descr'] = '测试'
+        args['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+
+        insert_trained_model_into_mongo(bp_model, args)
         insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args)
         success = 1
     except Exception as e:
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-        result['msg'] = my_exception
+        # my_exception = traceback.format_exc()
+        # my_exception.replace("\n", "\t")
+        # result['msg'] = my_exception
+        print("???", e)
     end_time = time.time()
 
     result['success'] = success
@@ -69,23 +74,23 @@ if __name__ == "__main__":
     logger = logging.getLogger("model_training_bp log")
     from waitress import serve
 
-    # serve(app, host="0.0.0.0", port=10103, threads=4)
-    print("server start!")
-    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
-    'model_table': 'j00083_model', 'mongodb_read_table': 'j00083', 'col_time': 'dateTime',
-    'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
-    args_dict['features'] = args_dict['features'].split(',')
-    arguments.update(args_dict)
-    dh = DataHandler(logger, arguments)
-    cnn = CNNHandler(logger)
-    opt = argparse.Namespace(**arguments)
-    opt.Model['input_size'] = len(opt.features)
-    train_data = get_data_from_mongo(args_dict)
-    train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt)
-    cnn_model = cnn.training(opt, [train_x, train_y, valid_x, valid_y])
-
-    args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
-    args_dict['params'] = arguments
-    args_dict['descr'] = '测试'
-    insert_trained_model_into_mongo(cnn_model, args_dict)
-    insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)
+    serve(app, host="0.0.0.0", port=10103, threads=4)
+    # print("server start!")
+    # args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+    # 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083', 'col_time': 'dateTime',
+    # 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    # args_dict['features'] = args_dict['features'].split(',')
+    # arguments.update(args_dict)
+    # dh = DataHandler(logger, arguments)
+    # cnn = CNNHandler(logger)
+    # opt = argparse.Namespace(**arguments)
+    # opt.Model['input_size'] = len(opt.features)
+    # train_data = get_data_from_mongo(args_dict)
+    # train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt)
+    # cnn_model = cnn.training(opt, [train_x, train_y, valid_x, valid_y])
+    #
+    # args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+    # args_dict['params'] = arguments
+    # args_dict['descr'] = '测试'
+    # insert_trained_model_into_mongo(cnn_model, args_dict)
+    # insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)