import sklearn # do this first, otherwise get a libgomp error?! import argparse, os, sys, random, logging import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, BatchNormalization, TimeDistributed from tensorflow.keras.optimizers import Adam from conversion import convert_to_tf_lite, save_saved_model # Lower TensorFlow log levels tf.get_logger().setLevel(logging.ERROR) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Set random seeds for repeatable results RANDOM_SEED = 3 random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) tf.random.set_seed(RANDOM_SEED) # Load files parser = argparse.ArgumentParser(description='Running custom Keras models in Edge Impulse') parser.add_argument('--data-directory', type=str, required=True) parser.add_argument('--epochs', type=int, required=True) parser.add_argument('--learning-rate', type=float, required=True) parser.add_argument('--out-directory', type=str, required=True) args, unknown = parser.parse_known_args() if not os.path.exists(args.out_directory): os.mkdir(args.out_directory) # grab train/test set and convert into TF Dataset X_train = np.load(os.path.join(args.data_directory, 'X_split_train.npy'), mmap_mode='r') Y_train = np.load(os.path.join(args.data_directory, 'Y_split_train.npy')) X_test = np.load(os.path.join(args.data_directory, 'X_split_test.npy'), mmap_mode='r') Y_test = np.load(os.path.join(args.data_directory, 'Y_split_test.npy')) classes = Y_train.shape[1] MODEL_INPUT_SHAPE = X_train.shape[1:] train_dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train)) validation_dataset = tf.data.Dataset.from_tensor_slices((X_test, Y_test)) # place to put callbacks (e.g. to MLFlow or Weights & Biases) callbacks = [] # model architecture model = Sequential() model.add(Dense(20, activation='relu', activity_regularizer=tf.keras.regularizers.l1(0.00001))) model.add(Dense(10, activation='relu', activity_regularizer=tf.keras.regularizers.l1(0.00001))) model.add(Dense(classes, activation='softmax', name='y_pred')) # this controls the learning rate opt = Adam(learning_rate=args.learning_rate, beta_1=0.9, beta_2=0.999) # this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself BATCH_SIZE = 32 train_dataset_batch = train_dataset.batch(BATCH_SIZE, drop_remainder=False) validation_dataset_batch = validation_dataset.batch(BATCH_SIZE, drop_remainder=False) # train the neural network model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model.fit(train_dataset_batch, epochs=args.epochs, validation_data=validation_dataset_batch, verbose=2, callbacks=callbacks) print('') print('Training network OK') print('') # Use this flag to disable per-channel quantization for a model. # This can reduce RAM usage for convolutional models, but may have # an impact on accuracy. disable_per_channel_quantization = False # Save the model to disk save_saved_model(model, args.out_directory) # Create tflite files (f32 / i8) convert_to_tf_lite(model, args.out_directory, validation_dataset, MODEL_INPUT_SHAPE, 'model.tflite', 'model_quantized_int8_io.tflite', disable_per_channel_quantization)