def parameterChoosing(self):
# Set the parameters by cross-validation
tuned_parameters = [{'penalty': ['l1'],
'C': np.logspace(-5,5)},
{'penalty': ['l2'],
'C': np.logspace(-5,5)}]
clf = GridSearchCV(linear_model.LogisticRegression(tol=1e-6), tuned_parameters, cv=5, scoring='precision_weighted')
clf.fit(self.X_train, self.y_train.ravel())
print "Best parameters set found on development set:\n"
print clf.best_params_
print "Grid scores on development set:\n"
for params, mean_score, scores in clf.grid_scores_:
print "%0.3f (+/-%0.03f) for %r\n" % (mean_score, scores.std() * 2, params)
print "Detailed classification report:\n"
y_true, y_pred = self.y_test, clf.predict(self.X_test)
print classification_report(y_true, y_pred)
python类logspace()的实例源码
def calc_IndCurrent_FD_spectrum(self):
"""Gives FD induced current spectrum"""
#INITIALIZE ATTRIBUTES
Bpx = self.Bpx
Bpz = self.Bpz
a2 = self.a2
azm = np.pi*self.azm/180.
R = self.R
L = self.L
w = 2*np.pi*np.logspace(0,8,101)
Ax = np.pi*a2**2*np.sin(azm)
Az = np.pi*a2**2*np.cos(azm)
Phi = (Ax*Bpx + Az*Bpz)
EMF = -1j*w*Phi
Is = EMF/(R + 1j*w*L)
return EMF,Is
def WaveVelandSkindWidget(epsr, sigma):
frequency = np.logspace(1, 9, 61)
vel, skind = WaveVelSkind(frequency, epsr, 10**sigma)
figure, ax = plt.subplots(1, 2, figsize = (10, 4))
ax[0].loglog(frequency, vel, 'b', lw=3)
ax[1].loglog(frequency, skind, 'r', lw=3)
ax[0].set_ylim(1e6, 1e9)
ax[1].set_ylim(1e-1, 1e7)
ax[0].set_xlabel('Frequency (Hz)')
ax[0].set_ylabel('Velocity (m/s)')
ax[1].set_xlabel('Frequency (Hz)')
ax[1].set_ylabel('Skin Depth (m)')
ax[0].grid(True)
ax[1].grid(True)
plt.show()
return
def process(self, **kwargs):
"""Process module."""
self._rest_times = kwargs['rest_times']
self._rest_t_explosion = kwargs[self.key('resttexplosion')]
outputs = OrderedDict()
max_times = max(self._rest_times)
if max_times > self._rest_t_explosion:
outputs['dense_times'] = np.unique(
np.concatenate(([0.0], [
x + self._rest_t_explosion
for x in np.logspace(
self.L_T_MIN,
np.log10(max_times - self._rest_t_explosion),
num=self._n_times)
], self._rest_times)))
else:
outputs['dense_times'] = np.array(self._rest_times)
outputs['dense_indices'] = np.searchsorted(
outputs['dense_times'], self._rest_times)
return outputs
def main(table, schema):
logger = get_root_logger()
_ = get_header(logger, 'Building a model to predict Trump tweets')
loc = get_path(__file__) + '/{0}'
params = {
'features__text_processing__vect__ngram_range': [(1, 1), (1, 2), (1, 3)],
'clf__n_estimators': [int(x) for x in logspace(1, 3, num=10)]
}
model = RandomForestModel(table, schema, **params)
model.train()
model.evaluate()
model.save(loc.format('saved_models'))
def _get_knot_spacing(self):
"""Returns a list of knot locations based on the spline parameters
If the option `spacing` is 'lin', uses linear spacing
'log', uses log spacing
Places 'spline_N' knots between 'spline_min' and 'spline_max'
"""
space_key = self.get_option('spacing').lower()[:3]
if space_key == 'log':
vol = np.logspace(np.log10(self.get_option('spline_min')),
np.log10(self.get_option('spline_max')),
self.get_option('spline_N'))
elif space_key == 'lin':
vol = np.linspace(self.get_option('spline_min'),
self.get_option('spline_max'),
self.get_option('spline_N'))
else:
raise KeyError("{:} only `lin`ear and `log` spacing are"
"accepted".format(self.get_inform(1)))
# end
return vol
def plot():
'''
'''
# Register the functions
builtins.__dict__.update(globals())
# Loop over various dataset sizes
Narr = np.logspace(0, 5, 5)
tpp = np.zeros_like(Narr)
tbm = np.zeros_like(Narr)
tps = np.zeros_like(Narr)
for i, N in enumerate(Narr):
tpp[i] = timeit.timeit('run_pp(%d)' % N, number = 10) / 10.
if batman is not None:
tbm[i] = timeit.timeit('run_bm(%d)' % N, number = 10) / 10.
if ps is not None:
tps[i] = timeit.timeit('run_ps(%d)' % N, number = 10) / 10.
pl.plot(Narr, tpp, '-o', label = 'planetplanet')
if batman is not None:
pl.plot(Narr, tbm, '-o', label = 'batman')
if ps is not None:
pl.plot(Narr, tps, '-o', label = 'pysyzygy')
pl.legend()
pl.yscale('log')
pl.xscale('log')
pl.ylabel('Time [seconds]', fontweight = 'bold')
pl.xlabel('Number of datapoints', fontweight = 'bold')
def fit_koff(nmax=523, NN=4e8, **params):
tbind = params.pop("tbind")
params["kd"] = 1e9/tbind
dx = params.pop("dx")
rw = randomwalk.get_rw(NAME, params, setup=setup_rw, calc=True)
rw.domains[1].dx = dx
times = draw_empirically(rw, N=NN, nmax=nmax, success=False)
bins = np.logspace(np.log10(min(times)), np.log10(max(times)), 35)
#bins = np.logspace(-3., 2., 35)
hist, _ = np.histogram(times, bins=bins)
cfd = np.cumsum(hist)/float(np.sum(hist))
t = 0.5*(bins[:-1] + bins[1:])
tmean = times.mean()
toff = NLS(t, cfd, t0=tmean)
koff = 1./toff
return dict(t=t, cfd=cfd, toff=toff, tmean=tmean, koff=koff)
##### run rw in collect mode and draw bindings from empirical distributions
def exponential_hist(times, a, b, **params):
cutoff = 0.03 # cutoff frequency in ms
if len(times) == 0:
return
bins = np.logspace(a, b, 100)
hist = plt.hist(times, bins=bins, alpha=0.5, **params)
plt.xscale("log")
params.pop("label")
color = params.pop("color")
total = integrate_hist(hist, cutoff)
if sum(times > cutoff) == 0:
return
tmean = times[times > cutoff].mean()
T = np.logspace(a-3, b, 1000)
fT = np.exp(-T/tmean)*T/tmean
fT *= total/integrate_values(T, fT, cutoff)
plt.plot(T, fT, label="exp. fit, mean = %.2f ms" % (tmean,),
color="dark" + color, **params)
plt.xlim(10**a, 10**b)
def _update_data_x(self):
if self.is_zero_span():
self._data_x = np.zeros(self.points)
# data_x will be measured during first scan...
return
if self.logscale:
raw_values = np.logspace(
np.log10(self.start_freq),
np.log10(self.stop_freq),
self.points,
endpoint=True)
else:
raw_values = np.linspace(self.start_freq,
self.stop_freq,
self.points,
endpoint=True)
values = np.zeros(len(raw_values))
for index, val in enumerate(raw_values):
values[index] = self.iq.__class__.frequency. \
validate_and_normalize(self, val) # retrieve the real freqs...
self._data_x = values
def MieQ_withWavelengthRange(m, diameter, wavelengthRange=(100,1600), nw=1000, logW=False):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#MieQ_withWavelengthRange
if type(m) == complex and len(wavelengthRange)==2:
if logW:
wavelengths = np.logspace(np.log10(wavelengthRange[0]),np.log10(wavelengthRange[1]),nw)
else:
wavelengths = np.linspace(wavelengthRange[0],wavelengthRange[1],nw)
_qD = [AutoMieQ(m,wavelength,diameter) for wavelength in wavelengths]
elif type(m) in [np.ndarray,list,tuple] and len(wavelengthRange)==len(m):
wavelengths=wavelengthRange
_qD = [MieQ(emm,wavelength,diameter) for emm,wavelength in zip(m,wavelengths)]
else:
warnings.warn("Error: the size of the input data is minmatched. Please examine your inputs and try again.")
return
qext = np.array([q[0] for q in _qD])
qsca = np.array([q[1] for q in _qD])
qabs = np.array([q[2] for q in _qD])
g = np.array([q[3] for q in _qD])
qpr = np.array([q[4] for q in _qD])
qback = np.array([q[5] for q in _qD])
qratio = np.array([q[6] for q in _qD])
return wavelengths, qext, qsca, qabs, g, qpr, qback, qratio
def MieQ_withSizeParameterRange(m, xRange=(1,10), nx=1000, logX=False):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#MieQ_withSizeParameterRange
if logX:
xValues = list(np.logspace(np.log10(xRange[0]),np.log10(xRange[1]),nx))
else:
xValues = list(np.linspace(xRange[0],xRange[1], nx))
dValues = [1000*x/np.pi for x in xValues]
_qD = [AutoMieQ(m,1000,d) for d in dValues]
qext = np.array([q[0] for q in _qD])
qsca = np.array([q[1] for q in _qD])
qabs = np.array([q[2] for q in _qD])
g = np.array([q[3] for q in _qD])
qpr = np.array([q[4] for q in _qD])
qback = np.array([q[5] for q in _qD])
qratio = np.array([q[6] for q in _qD])
return xValues, qext, qsca, qabs, g, qpr, qback, qratio
def MieQ_withWavelengthRange(m, diameter, wavelengthRange=(100,1600), nw=1000, logW=False):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#MieQ_withWavelengthRange
if type(m) == complex and len(wavelengthRange)==2:
if logW:
wavelengths = np.logspace(np.log10(wavelengthRange[0]),np.log10(wavelengthRange[1]),nw)
else:
wavelengths = np.linspace(wavelengthRange[0],wavelengthRange[1],nw)
_qD = [AutoMieQ(m,wavelength,diameter) for wavelength in wavelengths]
elif type(m) in [np.ndarray,list,tuple] and len(wavelengthRange)==len(m):
wavelengths=wavelengthRange
_qD = [MieQ(emm,wavelength,diameter) for emm,wavelength in zip(m,wavelengths)]
else:
warnings.warn("Error: the size of the input data is minmatched. Please examine your inputs and try again.")
return
qext = np.array([q[0] for q in _qD])
qsca = np.array([q[1] for q in _qD])
qabs = np.array([q[2] for q in _qD])
g = np.array([q[3] for q in _qD])
qpr = np.array([q[4] for q in _qD])
qback = np.array([q[5] for q in _qD])
qratio = np.array([q[6] for q in _qD])
return wavelengths, qext, qsca, qabs, g, qpr, qback, qratio
def MieQ_withSizeParameterRange(m, xRange=(1,10), nx=1000, logX=False):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#MieQ_withSizeParameterRange
if logX:
xValues = list(np.logspace(np.log10(xRange[0]),np.log10(xRange[1]),nx))
else:
xValues = list(np.linspace(xRange[0],xRange[1], nx))
dValues = [1000*x/np.pi for x in xValues]
_qD = [AutoMieQ(m,1000,d) for d in dValues]
qext = np.array([q[0] for q in _qD])
qsca = np.array([q[1] for q in _qD])
qabs = np.array([q[2] for q in _qD])
g = np.array([q[3] for q in _qD])
qpr = np.array([q[4] for q in _qD])
qback = np.array([q[5] for q in _qD])
qratio = np.array([q[6] for q in _qD])
return xValues, qext, qsca, qabs, g, qpr, qback, qratio
def grid_search_gamma(rbf_svm, X, y):
## grid search - gamma only
# use a full grid over all parameters
param_grid = {'gamma': np.logspace(-15, 4, num = 5000, base = 2.0)}
grid_search = GridSearchCV(rbf_svm, param_grid = param_grid, scoring = 'roc_auc',
cv = 10, pre_dispatch = '2*n_jobs', n_jobs = -1)
# re-fit on the whole training data
grid_search.fit(X, y)
grid_search_scores = [score[1] for score in grid_search.grid_scores_]
print('Best parameters : {}'.format(grid_search.best_params_))
print('Best score : {}'.format(grid_search.best_score_))
# set canvas
fig, ax = plt.subplots(1, 1)
# ax.scatter(X[:, 0], X[:, 1], c = y)
ax.plot(param_grid['gamma'], grid_search_scores)
ax.set_title('AUC = f(gamma, C = 1.0)', fontsize = 'large')
ax.set_xlabel('gamma', fontsize = 'medium')
ax.set_ylabel('AUC', fontsize = 'medium')
return fig
def test_cv():
"""Simple CV check."""
# XXX: don't use scikit-learn for tests.
X, y = make_regression()
cv = KFold(X.shape[0], 5)
glm_normal = GLM(distr='gaussian', alpha=0.01, reg_lambda=0.1)
# check that it returns 5 scores
scores = cross_val_score(glm_normal, X, y, cv=cv)
assert_equal(len(scores), 5)
param_grid = [{'alpha': np.linspace(0.01, 0.99, 2)},
{'reg_lambda': np.logspace(np.log(0.5), np.log(0.01),
10, base=np.exp(1))}]
glmcv = GridSearchCV(glm_normal, param_grid, cv=cv)
glmcv.fit(X, y)
def test_l1l2path():
X_file = 'data_c/X_200_100.csv'
Y_file = 'data_c/Y_200_100.csv'
X = np.genfromtxt(X_file)
Y = np.genfromtxt(Y_file)
mu = 1e-3
tau_range = np.logspace(-2,0,3)
k_max = 10000
tolerance = 1e-4
pc = pplus.PPlusConnection(debug=False, workers_servers = ('127.0.0.1',))
pc.submit(l1l2path_job,
args=(X, Y, mu, tau_range, k_max, tolerance),
modules=('numpy as np', 'ctypes'))
result_keys = pc.collect()
print result_keys
print("Done")
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
def power_series (x, base):
nmax = ceil (log (x) / log (base))
return np.logspace (0.0, nmax, num=nmax+1, base=base)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
def _generate_segments(self, n_segments, n_superpositions=5):
# Assume that the actual surface is a superposition of sinusoid
# functions from which sample n_segments points and connect those
# linearly
# Generate sinusoids of the form -5 * sin(a * x + b)
a = np.logspace(0, 0.5, n_superpositions)
b = (0.25 * self.random_state.rand(n_superpositions) - 0.125) * np.pi
# Generate x and y components of segments
x = np.hstack((np.sort(self.random_state.rand(n_segments) * 8.0)))
y = (-5 * np.sin(a * x[:, None] + b)).mean(axis=1)
# Start at (0, 0)
x[0] = y[0] = 0
# Planar segment at the end which is long enough to avoid shooting
# over the border
x[-1] = 100.0
y[-1] = y[-2]
return np.vstack((x, y)).T
def parameterChoosing(self):
# Set the parameters by cross-validation
tuned_parameters = [{'alpha': np.logspace(-5,5)
}
]
reg = GridSearchCV(linear_model.Ridge(alpha = 0.5), tuned_parameters, cv=5, scoring='mean_squared_error')
reg.fit(self.X_train, self.y_train)
print "Best parameters set found on development set:\n"
print reg.best_params_
print "Grid scores on development set:\n"
for params, mean_score, scores in reg.grid_scores_:
print "%0.3f (+/-%0.03f) for %r\n" % (mean_score, scores.std() * 2, params)
print reg.scorer_
print "MSE for test data set:"
y_true, y_pred = self.y_test, reg.predict(self.X_test)
print mean_squared_error(y_pred, y_true)
def parameterChoosing(self):
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'],
'gamma': np.logspace(-4, 3, 30),
'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]},
{'kernel': ['poly'],
'degree': [1, 2, 3, 4],
'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000],
'coef0': np.logspace(-4, 3, 30)},
{'kernel': ['linear'],
'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]}]
clf = GridSearchCV(svm.SVC(C=1), tuned_parameters, cv=5, scoring='precision_weighted')
clf.fit(self.X_train, self.y_train.ravel())
print "Best parameters set found on development set:\n"
print clf.best_params_
print "Grid scores on development set:\n"
for params, mean_score, scores in clf.grid_scores_:
print "%0.3f (+/-%0.03f) for %r\n" % (mean_score, scores.std() * 2, params)
print "Detailed classification report:\n"
y_true, y_pred = self.y_test, clf.predict(self.X_test)
print classification_report(y_true, y_pred)
def b2t(tb, n=1e2, logger=None, **kwargs):
tb = np.array(tb)
if isinstance(tb, type(1.1)):
return a2t(b2a(tb))
if tb.shape == ():
return a2t(b2a(tb))
if len(tb) < n:
n = len(tb)
tbs = -1.*np.logspace(np.log10(-tb.min()),
np.log10(-tb.max()), n)
ages = []
for i, tbi in enumerate(tbs):
ages += a2t(b2a(tbi)),
if logger:
logger(i)
ages = np.array(ages)
return tbs, ages
def run(n_seeds, n_jobs, _run, _seed):
seed_list = check_random_state(_seed).randint(np.iinfo(np.uint32).max,
size=n_seeds)
exps = []
exps += [{'method': 'sgd',
'step_size': step_size}
for step_size in np.logspace(-3, 3, 7)]
exps += [{'method': 'gram',
'reduction': reduction}
for reduction in [1, 4, 6, 8, 12, 24]]
rundir = join(basedir, str(_run._id), 'run')
if not os.path.exists(rundir):
os.makedirs(rundir)
Parallel(n_jobs=n_jobs,
verbose=10)(delayed(single_run)(config_updates, rundir, i)
for i, config_updates in enumerate(exps))
def run(n_seeds, n_jobs, _run, _seed):
seed_list = check_random_state(_seed).randint(np.iinfo(np.uint32).max,
size=n_seeds)
exps = []
exps += [{'method': 'sgd',
'step_size': step_size}
for step_size in np.logspace(-7, -7, 1)]
exps += [{'method': 'gram',
'reduction': reduction}
for reduction in [12]]
rundir = join(basedir, str(_run._id), 'run')
if not os.path.exists(rundir):
os.makedirs(rundir)
Parallel(n_jobs=n_jobs,
verbose=10)(delayed(single_run)(config_updates, rundir, i)
for i, config_updates in enumerate(exps))
def create_matrix_sparse_from_conf(conf):
restypes = ['tdnn', 'lpfb']
# tdnn res weights = []
if 'restype' not in conf or conf['restype'] not in restypes:
return None
else:
if conf['restype'] == 'tdnn':
w_ = spa.dia_matrix(np.diag(np.ones((conf['N']-1,)), k = -1))
return w_
elif conf['restype'] == 'lpfb':
# w_ = spa.dia_matrix(np.diag(1 - (np.logspace(1e-3, 1e-1, conf['N']) - 1), k = 0))
w_ = spa.dia_matrix(np.diag(1 - np.exp(np.linspace(-6, -0.69, conf['N'])), k = 0))
return w_
return None
################################################################################
# Standalone class for learning rules
# - Recursive Least Squares (RLS, depends on rlspy.py): the vanilla online supervised
# reservoir training method
# - First-order reduced and controlled error or FORCE learning (Sussillo & Abbott, 2012)
# - FORCEmdn: Mixture density output layer using FORCE rule (Berthold, 2017)
# - Exploratory Hebbian learning (Legenstein & others, 2010)
def selectFixedOrLog(self):
"""
Check fixed or log-linear asymmetry parameter
"""
self.parameters['fixed_p'] = self.ui.radioButtonFixedP.isChecked()
if self.parameters['fixed_p']:
self.ui.radioButtonFixedP.setChecked(True)
self.ui.radioButtonLogLinearP.setChecked(False)
self.ui.frame_2.setEnabled(False)
self.ui.frame.setEnabled(True)
# self.p = lambda x: self.ui.spinBoxP.value()
self.parameters['asym_param'] = self.ui.spinBoxP.value()
else:
self.ui.radioButtonFixedP.setChecked(False)
self.ui.radioButtonLogLinearP.setChecked(True)
self.ui.frame_2.setEnabled(True)
self.ui.frame.setEnabled(False)
self.parameters['asym_param'] = \
lambda x: _np.logspace(_np.log10(self.parameters['asym_param_start']),
_np.log10(self.parameters['asym_param_end']),x)
self.changed.emit()
def getOpts(opts):
print("config opts...")
opts['validation'] = 0.1
opts['exemplarSize'] = 127
opts['instanceSize'] = 255-2*8
opts['lossRPos'] = 16
opts['lossRNeg'] = 0
opts['labelWeight'] = 'balanced'
opts['numPairs'] = 53200
opts['frameRange'] = 100
opts['trainNumEpochs'] = 50
opts['trainLr'] = np.logspace(-2, -5, opts['trainNumEpochs'])
opts['trainWeightDecay'] = 5e-04
opts['randomSeed'] = 1
opts['momentum'] = 0.9
opts['stddev'] = 0.01
opts['start'] = 0
opts['expName'] = '_20170511_s_tn_001'
opts['summaryFile'] = './data_20170511/'+opts['expName']
opts['ckptPath'] = './ckpt/'+opts['expName']
return opts
def getOpts(opts):
print("config opts...")
opts['validation'] = 0.1
opts['exemplarSize'] = 127
opts['instanceSize'] = 255-2*8
opts['lossRPos'] = 16
opts['lossRNeg'] = 0
opts['labelWeight'] = 'balanced'
opts['numPairs'] = 53200
opts['frameRange'] = 100
opts['trainNumEpochs'] = 50
opts['trainLr'] = np.logspace(-2, -5, opts['trainNumEpochs'])
opts['trainWeightDecay'] = 5e-04
opts['randomSeed'] = 1
opts['momentum'] = 0.9
opts['stddev'] = 0.01
opts['start'] = 0
opts['expName'] = '20170518_tn_o_001'
opts['summaryFile'] = './data_20170518/'+opts['expName']
opts['ckptPath'] = './ckpt/'+opts['expName']
return opts
def CAL_v(name, label_p, label_n, oracle, n_features, ftype, test_x, test_y):
online = OnlineBase(name, label_p, label_n, oracle, n_features, ftype, error=.5)
x, y = online.collect_pts(100, -1)
i = 0
q = online.get_n_query()
C_range = np.logspace(-2, 5, 10, base=10)
gamma_range = np.logspace(-5, 1, 10, base=10)
param_grid = dict(gamma=gamma_range, C=C_range)
while q < 3500:
i += 1
# h_ = ex.fit(x, y)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv, verbose=0, n_jobs=-1)
grid.fit(x, y)
h_ = grid.best_estimator_
online_ = OnlineBase('', label_p, label_n, h_.predict, n_features, ftype, error=.1)
x_, _ = online_.collect_pts(10, 200)
if x_ is not None and len(x_) > 0:
x.extend(x_)
y.extend(oracle(x_))
q += online_.get_n_query()
pred_y = h_.predict(test_x)
print len(x), q, sm.accuracy_score(test_y, pred_y)
def grid_retrain_in_f(self, n_dim=500):
rbf_map = RBFSampler(n_dim, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("mapper", rbf_map),
("svm", LinearSVC())])
# C_range = np.logspace(-5, 15, 21, base=2)
# gamma_range = np.logspace(-15, 3, 19, base=2)
# param_grid = dict(mapper__gamma=gamma_range, svm__C=C_range)
# cv = StratifiedShuffleSplit(Y, n_iter=5, test_size=0.2, random_state=42)
# grid = GridSearchCV(fourier_approx_svm, param_grid=param_grid, cv=cv)
# grid.fit(X, Y)
#
# rbf_svc2 = grid.best_estimator_
rbf_svc2 = fourier_approx_svm
rbf_svc2.fit(self.X_ex, self.y_ex)
self.set_clf2(rbf_svc2)
return self.benchmark()