import os

import numpy as np
import tensorflow as tf
from keras.engine.saving import model_from_json

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing

top_words = 5000

df = pd.read_csv('/Users/surenthiran/Downloads/Study PhD/FINAL PROJECT VIVA 2020/SurenJULY2020/Final_cleveland11.csv')
label_encoder = preprocessing.LabelEncoder()
df.iloc[:, -1] = label_encoder.fit_transform(df.iloc[:, -1])
X_ = df.iloc[:, 0:13]
Y_ = np.asarray(df.iloc[:, -1])
#
#
X = df.iloc[35648:48449, 0:13]
y = df.iloc[35648:48449, -1]
# define the keras model
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.35)

x_train, x_test = np.asarray(x_train), np.asarray(x_test)
print(x_train.shape[1])
xtrain1 = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1, 1))
xtest1 = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1, 1))

input_shape = (13, 1, 1)

rnn_size = 13

input = tf.keras.Input(shape=input_shape)
x = input

convolution_shape = x.get_shape()
x = tf.keras.layers.Reshape(target_shape=(int(convolution_shape[1]), int(convolution_shape[2] * convolution_shape[3])))(
    x)
lstm_3 = tf.keras.layers.LSTM(rnn_size, return_sequences=True, name='gru1')(x)
lstm_3b = tf.keras.layers.LSTM(rnn_size, return_sequences=True, go_backwards=True, name='gru_1a')(x)
lstm3_merged = tf.keras.layers.Add(name='layer_merged_face_value')([lstm_3, lstm_3b])
lstm_4 = tf.keras.layers.LSTM(rnn_size, return_sequences=True, name='gru2')(lstm3_merged)
lstm_4b = tf.keras.layers.LSTM(rnn_size, return_sequences=True, go_backwards=True, name='gru2a')(lstm3_merged)
x = tf.keras.layers.Concatenate(axis=1, name='DYNN1')([lstm_4, lstm_4b])
x = tf.keras.layers.Dropout(0.25)(x)
x = tf.keras.layers.Flatten()(x)
x1 = tf.keras.layers.Dense(1, activation='linear', name='Output_face_value')(x)

################  Model 2 start _ invoking ########################

convolution_shape2 = input.get_shape()
tensor_values1 = (int(convolution_shape2[1] * convolution_shape2[2] * convolution_shape2[3]),)

feeding_second = tf.keras.layers.Reshape(target_shape=(tensor_values1))(input)

Input_varible_sec = tf.keras.layers.Concatenate(axis=-1, name='Din')([feeding_second, x1])

convolution_shape3 = Input_varible_sec.get_shape()

Reshape_second_tensor = tf.keras.layers.Reshape(target_shape=(1, convolution_shape3[1]))(Input_varible_sec)

##########################################################################################################
gru_5 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(rnn_size, return_sequences=True, name='gru5'))(
    Reshape_second_tensor)
gru_5a = tf.keras.layers.Bidirectional(
    tf.keras.layers.GRU(rnn_size, return_sequences=True, go_backwards=True, name='gru_5a'))(Reshape_second_tensor)
gru5_merged = tf.keras.layers.Add()([gru_5, gru_5a])

gru_14 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(rnn_size, return_sequences=True, name='gru14'))(gru5_merged)
gru_14b = tf.keras.layers.Bidirectional(
    tf.keras.layers.GRU(rnn_size, return_sequences=True, go_backwards=True, name='gru14a'))(gru5_merged)
x14 = tf.keras.layers.Concatenate(axis=1, name='DYNN2')([gru_14, gru_14b])

x4 = tf.keras.layers.Dropout(0.5)(x14)

x4 = tf.keras.layers.Flatten()(x4)
x5 = tf.keras.layers.Dense(1, activation='linear', name='out_put_x_factor')(x4)

####################################################################

Input_varible_third = tf.keras.layers.Concatenate(axis=-1, name='Din3')([Input_varible_sec, x5])

convolution_shape4 = Input_varible_third.get_shape()

Reshape_third_tensor = tf.keras.layers.Reshape(target_shape=(1, convolution_shape4[1]))(Input_varible_third)

########################################################

gru_6 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(rnn_size, return_sequences=True, name='gru6'))(
    Reshape_third_tensor)
gru_6a = tf.keras.layers.Bidirectional(
    tf.keras.layers.GRU(rnn_size, return_sequences=True, go_backwards=True, name='gru_6a'))(Reshape_third_tensor)
gru6_merged = tf.keras.layers.Add()([gru_6, gru_6a])

gru_16 = tf.keras.layers.GRU(rnn_size, return_sequences=True, name='gru16')(gru6_merged)
gru_16b = tf.keras.layers.GRU(rnn_size, return_sequences=True, go_backwards=True, name='gru16a')(gru6_merged)
x16 = tf.keras.layers.Concatenate(axis=1, name='DYNN3')([gru_16, gru_16b])

gru_17 = tf.keras.layers.GRU(rnn_size, return_sequences=True, name='gru16')(gru6_merged)
gru_17b = tf.keras.layers.GRU(rnn_size, return_sequences=True, go_backwards=True, name='gru16a')(gru6_merged)
x17 = tf.keras.layers.Concatenate(axis=1, name='DYNN3')([gru_17, gru_17b])

gru_18 = tf.keras.layers.GRU(rnn_size, return_sequences=True, name='gru18')(gru6_merged)
gru_18b = tf.keras.layers.GRU(rnn_size, return_sequences=True, go_backwards=True, name='gru18a')(gru6_merged)
x18 = tf.keras.layers.Concatenate(axis=1, name='DYNN3')([gru_18, gru_18b])

x6 = tf.keras.layers.Dropout(0.5)(x16)

x6 = tf.keras.layers.Flatten()(x6)
x7 = tf.keras.layers.Dense(1, activation='linear', name='output_Buy_price')(x6)

###############################################################################################################################

model = tf.keras.Model(inputs=input, outputs=[x1, x5, x7])

adam = tf.keras.optimizers.Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

model.compile(loss='mean_squared_error',
              optimizer=adam,
              metrics={'Output_face_value': 'mse', 'out_put_x_factor': 'mse', 'output_Buy_price': 'mse'}
              )

learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                               patience=3,
                                                               verbose=1, factor=0.5,
                                                               min_lr=0.00001)

model.fit(
    xtrain1, [y_train, y_train, y_train],
    batch_size=150, verbose=10, nb_epoch=10, validation_data=(xtest1, [y_test, y_test, y_test])
)

json_file = open('/Users/surenthiran/Downloads/Study PhD/FINAL PROJECT VIVA 2020/SurenJULY2020/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)

loaded_model.load_weights("/Users/surenthiran/Downloads/Study PhD/FINAL PROJECT VIVA 2020/SurenJULY2020/same212.h5")

print("Loaded model from disk")

loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1] * 100))

loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print("%s: %.4f%%" % (loaded_model.metrics_names[1], score[1] * 100))
# load weights into new model
loaded_model.load_weights("/Users/surenthiran/Downloads/Study PhD/FINAL PROJECT VIVA 2020/SurenJULY2020/same212.h5")

# #
from keras import backend as K


def recall_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall


def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision


def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))


loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', f1_m, precision_m, recall_m])
loss, score, f1_score, precision, recall = loaded_model.evaluate(x_test, y_test, verbose=0)
print("%s: %.4f%%" % (loaded_model.metrics_names[1], score * 100))
print("%s: %.4f" % (loaded_model.metrics_names[0], loss))
print("%s: %.4f" % (loaded_model.metrics_names[2], f1_score))
print("%s: %.4f" % (loaded_model.metrics_names[3], precision))
print("%s: %.4f" % (loaded_model.metrics_names[4], recall))
############################################ Predict Output#######################################################
result = loaded_model.predict_classes(x_test)
print(result)
