Junior Member
Joined: Sep 2017
Posts: 82
|
How about : // Optimization parameters - set these as default values
var learning_rate = 0.001;
var num_neurons = 50;
var dropout_rate = 0.3;
var epochs = 100;
var batch_size = 32;
// Function to handle neural network operations
var neural(int Status, int model, int NumSignals, void* Data)
{
if(!wait(0)) return 0;
// Optimization parameters - adjust these as needed within optimize calls
learning_rate = optimize(0.001, 0.0001, 0.01, 0.0001);
num_neurons = optimize(50, 10, 100, 10);
dropout_rate = optimize(0.3, 0.1, 0.5, 0.1);
epochs = optimize(100, 50, 200, 50);
batch_size = optimize(32, 16, 64, 16);
// open a Python script with the same name as the strategy script
if(Status == NEURAL_INIT) {
if(!pyStart(strf("%s.py",Script),1)) return 0;
pyX("neural_init()");
return 1;
}
// export batch training samples and call the Python training function
if(Status == NEURAL_TRAIN) {
string name = strf("Data\\signals%i.csv",Core);
file_write(name,Data,0);
pyX(strf("XY = pandas.read_csv('%s%s',header = None)",slash(ZorroFolder),slash(name)));
pySet("AlgoVar",AlgoVar,8);
// Pass optimization parameters to Python
pySet("learning_rate", &learning_rate, 1);
pySet("num_neurons", &num_neurons, 1);
pySet("dropout_rate", &dropout_rate, 1);
pySet("epochs", &epochs, 1);
pySet("batch_size", &batch_size, 1);
if(!pyX(strf("neural_train(%i,XY,learning_rate,num_neurons,dropout_rate,epochs,batch_size)", model+1)))
return 0;
return 1;
}
// predict the target with the Python predict function
if(Status == NEURAL_PREDICT) {
pySet("AlgoVar",AlgoVar,8);
pySet("X",(double*)Data,NumSignals);
pyX(strf("Y = neural_predict(%i,X)", model+1));
return pyVar("Y[0]");
}
// save all trained models
if(Status == NEURAL_SAVE) {
print(TO_ANY,"\nStore %s", strrchr(Data,'\\')+1);
return pyX(strf("neural_save('%s')", slash(Data)));
}
// load all trained models
if(Status == NEURAL_LOAD) {
printf("\nLoad %s", strrchr(Data,'\\')+1);
return pyX(strf("neural_load('%s')", slash(Data)));
}
return 1;
} And a new python code: import numpy as np
from tensorflow import keras
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import RMSprop
models = []
def neural_train(model, XY, learning_rate, num_neurons, dropout_rate, epochs, batch_size):
X = np.array(XY.iloc[:, :-1])
Y = XY.iloc[:, -1].apply(lambda x: 1 if x > 0 else 0)
model = keras.models.Sequential()
model.add(Dense(num_neurons, activation='relu', input_shape=(X.shape[1],)))
model.add(Dropout(dropout_rate))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=learning_rate),
metrics=['accuracy'])
model.fit(X, Y, epochs=epochs, batch_size=batch_size, validation_split=0, shuffle=False)
models.append(model)
def neural_predict(model, X):
if isinstance(X, list):
X = np.array(X)
elif isinstance(X, np.ndarray) and X.ndim == 1:
X = X.reshape(1, -1)
Y = models[model].predict(X)
return [1 if y > 0.5 else 0 for y in Y]
def neural_save(name):
serialized_models = [model.to_json() for model in models]
with open(name, 'w') as file:
for serialized_model in serialized_models:
file.write(serialized_model + '\n')
def neural_load(name):
global models
models = []
with open(name, 'r') as file:
for line in file:
model = keras.models.model_from_json(line.strip())
models.append(model)
def neural_init():
np.random.seed(365)
global models
models = []
# Example usage:
# neural_init()
# neural_train(0, XY, 0.001, 50, 0.3, 100, 32)
# result = neural_predict(0, X)
# neural_save('models.txt')
# neural_load('models.txt')
Last edited by TipmyPip; 12/01/23 15:10.
|