def get_classifier_class(class_name):
name_table = {
'svm': SVC,
'k_neighbors': KNeighborsClassifier,
'gaussian_process': GaussianProcessClassifier,
'decision_tree': DecisionTreeClassifier,
'random_forest': RandomForestClassifier,
'ada_boost': AdaBoostClassifier,
'mlp': MLPClassifier,
'gaussian_naive_bayes': GaussianNB,
'quadratic_discriminant_analysis': QuadraticDiscriminantAnalysis
}
if class_name not in name_table:
raise ValueError('No such classifier')
return name_table[class_name]
python类MLPClassifier()的实例源码
classify.py 文件源码
项目:oss-github-analysis-project
作者: itu-oss-project-team
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def __create_classifiers(self):
classifiers = list()
classifiers.append({"func": linear_model.SGDClassifier(loss="log"),
"name": "sgd"})
classifiers.append({"func": neighbors.KNeighborsClassifier(1, weights='distance'),
"name": "knn1"})
classifiers.append({"func": neighbors.KNeighborsClassifier(3, weights='distance'),
"name": "knn3"})
classifiers.append({"func": neighbors.KNeighborsClassifier(5, weights='distance'),
"name": "knn5"})
classifiers.append({"func": GaussianNB(),
"name": "naive_bayes"})
# classifiers.append({"func": tree.DecisionTreeClassifier(), "name": "decision_tree"})
# classifiers.append({"func": MLPClassifier(max_iter=10000), "name": "mlp"})
# classifiers.append({"func": RandomForestClassifier(), "name": "random_forest"})
return classifiers
def __init__(self, filepath="files", is_delta_mode=False, verbose=False):
self.verbose = verbose
self.message = ""
self.filepath = filepath
self.is_delta = is_delta_mode
# Load files
try:
self.NN = pickle.load(open(self.filepath+'/model.pkl','rb'))
# Load user names
userList = open(self.filepath+"/metadata.txt", "r")
self.users = userList.read().split('\n')
userList.close()
except FileNotFoundError:
print("Model and metadata.txt not found.")
self.mlp = MLPClassifier(hidden_layer_sizes=(50, 50, 50), activation = 'logistic')
if self.verbose:
print("Delta Mode enable = ", is_delta_mode)
# Train the network and generate model.pkl file and csv file
def rede_neural(X, y):
print("Iniciando treinamento da Rede Neural")
X2 = normalize(X)
clf = MLPClassifier(hidden_layer_sizes=(100,50), activation='tanh', algorithm='adam', alpha=1e-5,
learning_rate='constant',tol=1e-8,learning_rate_init=0.0002,
early_stopping=True,validation_fraction=0.2)
kf = KFold(len(y),n_folds=3)
i = 0
for train,test in kf:
start = time.time()
i = i + 1
print("Treinamento",i)
# dividindo dataset em treino e test
#X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4, random_state=1)
X_train, X_test, y_train, y_test = X2[train], X2[test], y[train], y[test]
# fit
clf.fit(X_train, y_train)
print("score:",clf.score(X_test, y_test),"(",(time.time()-start)/60.0,"minutos )")
return clf
def NN_model(X, target):
'''A perceptron classifier for classifying whether a route should be made
one-way or not for a particular time-period.
Parameters
==========
X : int
An integer column matrix
The default algorithm ‘adam’ works pretty well on relatively large datasets
(with thousands of training samples or more) in terms of both training time and validation score. For small datasets, however, ‘l-bfgs’ can converge faster and perform better.
activation: logistic, the logistic sigmoid function, returns f(x) = 1 / (1 + exp(-x)).
alpha: 0.0001 default
learning_rate: 'constant'
max_iter : int, optional, default 200
tol : float, optional, default 1e-4
'''
y = [0, 1]
clf = MLPClassifier(hidden_layer_sizes=(0, 0), activation='logistic', algorithm='l-bfgs',
early_stopping=True)
clf.fit(X, y)
def get_classifier(self):
algo=self.algo
if algo=="GBT":
return GradientBoostingClassifier()
elif algo=="RF":
return RandomForestClassifier()
elif algo=="ADB":
return AdaBoostClassifier()
elif algo =="DT":
return DecisionTreeClassifier()
elif algo=="NB":
return BernoulliNB()
elif algo=="SGD":
return SGDClassifier()
elif algo=="SVC":
return LinearSVC()
elif algo=="MLPC":
return MLPClassifier(activation='logistic', batch_size='auto',
early_stopping=True, hidden_layer_sizes=(100,), learning_rate='adaptive',
learning_rate_init=0.1, max_iter=5000, random_state=1,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
return 0
def test():
from sklearn.neural_network import MLPClassifier
records = np.random.randint(0, 2, (10, 6))
results = np.random.randint(0, 2, (10, 3))
# records = np.eye(6)
# results = records
nn = mynn()
nn2 = MLPClassifier()
nn._fit(records, results)
nn2.fit(records, results)
print results
print nn._predict(records)
print nn2.predict(records)
# print nn.ww
# print results
# print nn.predict(records)
def neural_network(self, sensors_set):
features = list(self.dataset.get_sensors_set_features(sensors_set))
print("NEURAL NETWORK.....")
print("CLASSIFICATION BASED ON THESE SENSORS: ", self.dataset.get_remained_sensors(sensors_set))
print("NUMBER OF FEATURES: ", len(features))
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
self.dataset.get_train, self.dataset.get_test, features)
train_features_scaled, test_features_scaled = util.scale_features(train_features, test_features)
classifier_nn = MLPClassifier(hidden_layer_sizes=(const.PAR_NN_NEURONS[sensors_set],),
alpha=const.PAR_NN_ALPHA[sensors_set], max_iter=const.PAR_NN_MAX_ITER,
tol=const.PAR_NN_TOL)
classifier_nn.fit(train_features_scaled, train_classes)
test_prediction = classifier_nn.predict(test_features_scaled)
acc = accuracy_score(test_classes, test_prediction)
print("ACCURACY : " + str(acc))
print("END NEURAL NETWORK")
if not os.path.exists(const.DIR_RESULTS):
os.makedirs(const.DIR_RESULTS)
file_content = "acc\n" + str(acc)
with open(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_NEURAL_NETWORK_RESULTS, 'w') as f:
f.write(file_content)
# support vector machine algorithm training on training al train set and test on all test set
def para_ann(dataframe):
### Training and Testing Set
random.seed(0)
sample_index = random.sample(list(dataframe.index),int(1*len(dataframe.index)))
para_index = random.sample(sample_index, int(0.5*len(sample_index)))
op_df_train = dataframe.ix[para_index]
op_df_holdout = dataframe.drop(para_index)
columns = ['SMA_10','Momentum','stoch_K', 'WMA_10', 'MACD','A/D' , 'Volume']
X = op_df_train[columns].as_matrix()
Y = op_df_train['Adj Close'].as_matrix()
### ANN model
lbfgs_ann = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1).fit(X,Y)
X_holdout = op_df_holdout[columns].as_matrix()
Y_holdout = op_df_holdout['Adj Close'].as_matrix()
Z = pd.DataFrame(np.zeros((1,1)), columns = ['ANN with backpropagation'])
Y_result = Y_holdout
pred = lbfgs_ann.predict(X_holdout)
Y_result = np.vstack((Y_result, np.array(pred)))
Z.iloc[0,0] = sum(pred==Y_holdout)/len(pred)
Y_result = Y_result.T
return Z, Y_result
def MLPClassifier(X_train, y_train):
from sklearn.neural_network import MLPClassifier
now = datetime.datetime.now()
print ("MLPClassifier start in " + now.strftime('%Y-%m-%d %H:%M:%S'))
MLPC = MLPClassifier()
MLPC.fit(X_train, y_train)
now = datetime.datetime.now()
print ("MLPClassifier train done in " + now.strftime('%Y-%m-%d %H:%M:%S'))
y_pred_MLPC = MLPC.predict_proba(X_test)
y_pred_MLPC = pd.DataFrame(y_pred_MLPC[:,1:2],columns=['MLPC_predictions'])
y_pred_MLPC.to_csv('MLPC_result.csv', index=False)
now = datetime.datetime.now()
print ("MLPClassifier predict done in " + now.strftime('%Y-%m-%d %H:%M:%S'))
# SVC model
def MLPClassifier(X_train, y_train):
from sklearn.neural_network import MLPClassifier
now = datetime.datetime.now()
print ("MLPClassifier start in " + now.strftime('%Y-%m-%d %H:%M:%S'))
MLPC = MLPClassifier()
MLPC.fit(X_train, y_train)
now = datetime.datetime.now()
print ("MLPClassifier train done in " + now.strftime('%Y-%m-%d %H:%M:%S'))
y_pred_MLPC = MLPC.predict_proba(X_test)
y_pred_MLPC = pd.DataFrame(y_pred_MLPC[:,1:2],columns=['MLPC_predictions'])
y_pred_MLPC.to_csv('MLPC_result.csv', index=False)
now = datetime.datetime.now()
print ("MLPClassifier predict done in " + now.strftime('%Y-%m-%d %H:%M:%S'))
# SVC model
def MLPClassifier(X_train, y_train,X_test):
from sklearn.neural_network import MLPClassifier
now = datetime.datetime.now()
print ("MLPClassifier start in " + now.strftime('%Y-%m-%d %H:%M:%S'))
MLPC = MLPClassifier(activation = 'relu',
hidden_layer_sizes = 100)
MLPC.fit(X_train, y_train)
now = datetime.datetime.now()
print ("MLPClassifier train done in " + now.strftime('%Y-%m-%d %H:%M:%S'))
y_pred_MLPC = MLPC.predict_proba(X_test)
y_pred_MLPC = pd.DataFrame(y_pred_MLPC[:,1:2],columns=['MLPC_predictions'])
y_pred_MLPC.to_csv('MLPC_result_all.csv', index=False)
now = datetime.datetime.now()
print ("MLPClassifier predict done in " + now.strftime('%Y-%m-%d %H:%M:%S'))
# SVC model
def _train(self, X_matrix, y, **kwargs):
"""????
Parameters:
X_matrix (numpy.array): - ????????????
y (numpy.array): - ???????????
Returns:
sklearn.model: - sklearn???
"""
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(**kwargs)
model.fit(X_matrix, y)
return model
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
def test_alpha():
# Test that larger alpha yields weights closer to zero"""
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_learning_rate_warmstart():
# Tests that warm_start reuses past solution."""
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit'for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def __init__(self, path):
'''
Constructor
'''
self.path = path
self.model = MLPClassifier(solver='lbfgs', max_iter=600, hidden_layer_sizes=(300,), random_state=1)
#self.model = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(200,7), random_state=1)
#self.model = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto',
# beta_1=0.9, beta_2=0.999, early_stopping=False,
# epsilon=1e-08, hidden_layer_sizes=(200,), learning_rate='constant',
# learning_rate_init=0.001, max_iter=300, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
# solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
self.model_name = 'mlp'
self.scaler_mlp = 'scaler'
self.scaler = StandardScaler()
def classify(train=None, test=None, data=None, res_dir="res/", disp=True, outfilename=None):
"""Description of compare
compare multiple classifier and display the best one
"""
utils.print_success("Comparison of differents classifiers")
if data is not None:
train_features = data["train_features"]
train_groundtruths = data["train_groundtruths"]
test_features = data["test_features"]
test_groundtruths = data["test_groundtruths"]
else:
train = utils.abs_path_file(train)
test = utils.abs_path_file(test)
train_features, train_groundtruths = read_file(train)
test_features, test_groundtruths = read_file(test)
if not utils.create_dir(res_dir):
res_dir = utils.abs_path_dir(res_dir)
classifiers = {
"RandomForest": RandomForestClassifier(n_jobs=-1)
# "RandomForest": RandomForestClassifier(n_estimators=5),
# "KNeighbors":KNeighborsClassifier(3),
# "GaussianProcess":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
# "DecisionTree":DecisionTreeClassifier(max_depth=5),
# "MLP":MLPClassifier(),
# "AdaBoost":AdaBoostClassifier(),
# "GaussianNB":GaussianNB(),
# "QDA":QuadraticDiscriminantAnalysis(),
# "SVM":SVC(kernel="linear", C=0.025),
# "GradientBoosting":GradientBoostingClassifier(),
# "ExtraTrees":ExtraTreesClassifier(),
# "LogisticRegression":LogisticRegression(),
# "LinearDiscriminantAnalysis":LinearDiscriminantAnalysis()
}
for key in classifiers:
utils.print_success(key)
clf = classifiers[key]
utils.print_info("\tFit")
clf.fit(train_features, train_groundtruths)
utils.print_info("\tPredict")
predictions = clf.predict(test_features)
return predictions
def test_basic(self, single_chunk_classification):
X, y = single_chunk_classification
a = nn.ParitalMLPClassifier(classes=[0, 1], random_state=0)
b = nn_.MLPClassifier(random_state=0)
a.fit(X, y)
b.partial_fit(X, y, classes=[0, 1])
assert_estimator_equal(a, b)
def remake_mlp(self, event=None):
sizes = tuple([int(s.strip()) for s in str(self.layer_sizes.text()).split(',')])
alpha = float(self.alpha_var.text())
self.mlp = neural_network.MLPClassifier(hidden_layer_sizes=sizes, alpha=alpha)
def function_approx():
# init
clf = MLPClassifier(solver='sgd', alpha=1e-5,
activation='relu', hidden_layer_sizes=(10),
learning_rate='constant', learning_rate_init=0.001,
random_state=1, early_stopping=False,
verbose=True)
def fn(x, y):
return round(x + y)
# train
_MAX = 3
X = []
y = []
for i in range(1000):
_x, _y = random.randint(0, _MAX), random.randint(0, _MAX)
#_xnoise, _ynoise = random.random(), random.random()
_xnoise, _ynoise = 0, 0
X.append([_x / _MAX + _xnoise, _y / _MAX + _ynoise])
y.append(fn(_x, _y))
print(X)
print(y)
clf.fit(X, y)
print("weights:", clf.coefs_)
print("biases: ", clf.intercepts_)
# classify
for i in range(10):
_x, _y = random.uniform(0, _MAX), random.uniform(0, _MAX)
classification = clf.predict([[_x / _MAX, _y / _MAX]])
print("Classified {} as {} (should be {})".format(
[_x, _y], classification, fn(_x, _y)))
def mnist():
#digits = datasets.load_digits() # subsampled version
mnist = datasets.fetch_mldata("MNIST original")
print("Got the data.")
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
#images_and_labels = list(zip(digits.images, digits.target))
#for index, (image, label) in enumerate(images_and_labels[:4]):
# plt.subplot(2, 4, index + 1)
# plt.axis('off')
# plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
# plt.title('Training: %i' % label)
classifiers = [
#("SVM", svm.SVC(gamma=0.001)), # TODO doesn't finish; needs downsampled version?
("NN", MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)),
]
for name, classifier in classifiers:
print(name)
classifier.fit(X_train, y_train)
predicted = classifier.predict(X_test)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, predicted))
#images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
#for index, (image, prediction) in enumerate(images_and_predictions[:4]):
# plt.subplot(2, 4, index + 5)
# plt.axis('off')
# plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
# plt.title('Prediction: %i' % prediction)
#plt.show()
def MakeClassification(index,instancesData,classesData,instancesTest,type="proba",classifiersType="normal"):
classifiers = [
OneVsRestClassifier(sklearn.svm.SVC(probability=1),4),
DecisionTreeClassifier(random_state=0),
KNeighborsClassifier(n_jobs=4),
MLPClassifier(),
sklearn.svm.SVC(probability=1,decision_function_shape="ovo"),
OutputCodeClassifier(LinearSVC(random_state=0),code_size=2, random_state=0)
]
if (classifiersType == "ova"):
classifiers = [
OneVsRestClassifier(sklearn.svm.SVC(probability=1),4),
OneVsRestClassifier(DecisionTreeClassifier(random_state=0),4),
OneVsRestClassifier(KNeighborsClassifier(),4),
OneVsRestClassifier(MLPClassifier(),4),
OneVsRestClassifier(GaussianNB(),4)
]
if (index >= len(classifiers)):
print "ERROR. The index is not valid."
return None
else:
#print "Performing classification"
if type == "proba":
return classifiers[index].fit(instancesData,classesData).predict_proba(instancesTest)
else:
return classifiers[index].fit(instancesData,classesData).predict(instancesTest)
def __init__(self):
SingleClassifier.SingleClassifier.__init__(self)
# weak classifier
self.clf = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=(5, 2), learning_rate='constant',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
def learn(x, y, test_x):
(temp_x, temp_y) = tools.simple_negative_sample(x, y, variables.select_rate_nn)
clf = MLPClassifier(hidden_layer_sizes=(variables.unit_num_nn,), random_state=2017, max_iter=2000,
alpha=variables.alpha_nn,
learning_rate_init=variables.learning_rate_init_nn,solver="adam",activation="relu").fit(temp_x, temp_y)
prediction_list = clf.predict(test_x)
prediction_list_prob = clf.predict_proba(test_x)
return prediction_list,prediction_list_prob
def __init__(self, genres, data, type='knn', name='', clf_kwargs=None):
self.logger = get_logger('classifier')
self.display_name = name
self.genres = genres
self.m_genres = { genre:i for i, genre in enumerate(genres) }
self.randstate = np.random.RandomState()
self.scaler = StandardScaler()
clf_kwargs = { } if not clf_kwargs else clf_kwargs
if type in ['svm', 'mlp']:
clf_kwargs['random_state'] = self.randstate
if type == 'knn':
self.proto_clf = KNeighborsClassifier(**clf_kwargs)
elif type == 'svm':
self.proto_clf = SVC(**clf_kwargs)
elif type == 'dtree':
self.proto_clf = DecisionTreeClassifier(**clf_kwargs)
elif type == 'gnb':
self.proto_clf = GaussianNB(**clf_kwargs)
elif type == 'perc':
self.proto_clf = Perceptron(**clf_kwargs)
elif type == 'mlp':
self.proto_clf = MLPClassifier(**clf_kwargs)
elif type == 'ada':
self.proto_clf = AdaBoostClassifier(**clf_kwargs)
else:
raise LookupError('Classifier type "{}" is invalid'.format(type))
self._convert_data(data)
self.logger.info('Classifier: {} (params={})'.format(
self.proto_clf.__class__.__name__,
clf_kwargs
))
def __init__(self):
params = dict(clf__hidden_layer_sizes=[(50,), (70,), (100,)], clf__solver=['lbfgs', 'adam', 'sgd'])
super().__init__(neural_network.MLPClassifier(), params, 'MLPClassifier')