Network architecture is as follows:
dataset features(input_expansion) are expanded by using chebshy polynomial then i got(Exp_layer),
split the dataset into train and test and applied back propagation between exp_layer and output layer(1 node) using keras.
where when evaluating accuracy is fluctuating.
and please suggest me how to increase the accuracy. dataset is used is JM1 from Promise repository
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
from keras.optimizers import SGD,adam
from keras import backend as kb
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier##model used for feature selection
from sklearn.feature_selection import SelectFromModel##for feature selection
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
data=pd.read_excel('jm1_csv.xls')
dataset=data.dropna(how='any')
input_expansion = dataset.iloc[:,:-1].values
Y = dataset.iloc[:,-1].values
rows = dataset.shape[0]
columns=input_expansion.shape[1]
print(columns)
def funexp(x):
a = np.array([])
for i in x:
t0x=1
t1x=i
t2x=2*(i**2)-1
t3x=4*(i**3)-3*i
t4x=8*(i**4)-8*(i**2)+1
a = np.append(a, np.array([t0x,t1x,t2x,t3x,t4x]))
return a
Exp_layer = np.array([])
for i in input_expansion:
Exp_layer=np.append(Exp_layer,funexp(i))
Exp_layer=Exp_layer.reshape(len(input_expansion),(columns)*5)
X_train, X_test, Y_train, Y_test = train_test_split(Exp_layer, Y, test_size=0.2)
keras_model = Sequential()
keras_model.add(Dense(1, input_dim=(columns*5), activation='relu'))
optimizer=SGD(0.001)
keras_model.compile(loss='mean_absolute_error',optimizer=optimizer,metrics=['accuracy'])
keras_model.fit(X_train,Y_train,epochs=20,batch_size=50)
results = keras_model.evaluate(X_test, Y_test, batch_size=50)
print('Loss:', results[0])
print('Accuracy of FLANN:', results[1])
```