python类metrics()的实例源码

lens.py 文件源码 项目:sakmapper 作者: szairis 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def apply_lens(df, lens='pca', dist='euclidean', n_dim=2, **kwargs):
    """
    input: N x F dataframe of observations
    output: N x n_dim image of input data under lens function
    """
    if n_dim != 2:
        raise 'error: image of data set must be two-dimensional'
    if dist not in ['euclidean', 'correlation']:
        raise 'error: only euclidean and correlation distance metrics are supported'
    if lens == 'pca' and dist != 'euclidean':
        raise 'error: PCA requires the use of euclidean distance metric'

    if lens == 'pca':
        df_lens = pd.DataFrame(decomposition.PCA(n_components=n_dim, **kwargs).fit_transform(df), df.index)
    elif lens == 'mds':
        D = metrics.pairwise.pairwise_distances(df, metric=dist)
        df_lens = pd.DataFrame(manifold.MDS(n_components=n_dim, **kwargs).fit_transform(D), df.index)
    elif lens == 'neighbor':
        D = metrics.pairwise.pairwise_distances(df, metric=dist)
        df_lens = pd.DataFrame(manifold.SpectralEmbedding(n_components=n_dim, **kwargs).fit_transform(D), df.index)
    else:
        raise 'error: only PCA, MDS, neighborhood lenses are supported'

    return df_lens
model_eval.py 文件源码 项目:healthcareai-py 作者: HealthCatalyst 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test):
    """
    Given a trained estimator, calculate metrics.

    Args:
        trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
        y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
        x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)

    Returns:
        dict: A dictionary of metrics objects
    """
    # Get predictions
    predictions = trained_sklearn_estimator.predict(x_test)

    # Calculate individual metrics
    mean_squared_error = skmetrics.mean_squared_error(y_test, predictions)
    mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions)

    result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error}

    return result
_method.py 文件源码 项目:q2-diversity 作者: qiime2 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def beta(table: biom.Table, metric: str, n_jobs: int=1)-> skbio.DistanceMatrix:
    if metric not in non_phylogenetic_metrics():
        raise ValueError("Unknown metric: %s" % metric)
    if table.is_empty():
        raise ValueError("The provided table object is empty")

    counts = table.matrix_data.toarray().astype(int).T
    sample_ids = table.ids(axis='sample')

    return skbio.diversity.beta_diversity(
        metric=metric,
        counts=counts,
        ids=sample_ids,
        pairwise_func=sklearn.metrics.pairwise_distances,
        n_jobs=n_jobs
    )
components.py 文件源码 项目:sptgraph 作者: epfl-lts2 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def best_shape_clustering(mols, nb_layers, k_range=range(3, 20), train_ratio=0.8, cluster_key='shape_cid'):
    from sklearn.cross_validation import train_test_split
    from sklearn.metrics import silhouette_score

    shape_df = mols['dynamic'].apply(lambda x: temporal_shape(x, nb_layers))
    train_idx, test_idx = train_test_split(shape_df.index.values, train_size=train_ratio)

    train_mat = np.array(list(shape_df[shape_df.index.isin(train_idx)].values))
    full_mat = np.array(list(shape_df.values))

    centroids = None
    labels = None
    best_score = 0
    for k in k_range:
        res = cluster_shapes(train_mat, full_mat, k)
        score = silhouette_score(full_mat, res[1])
        if score > best_score:
            centroids = res[0]
            labels = res[1]
            best_score = score

    mols[cluster_key] = labels
    return mols, centroids
model.py 文件源码 项目:FeatureHub 作者: HDI-Project 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def compute_metrics_cv(self, X, Y):
        """Compute cross-validated metrics.

        Trains this model on data X with labels Y.

        Returns a MetricList with the name, scoring type, and value for each
        Metric. Note that these values may be numpy floating points, and should
        be converted prior to insertion in a database.

        Parameters
        ----------
        X : numpy array-like or pd.DataFrame
            data
        Y : numpy array-like or pd.DataFrame or pd.DataSeries
            labels
        """

        scorings, scorings_ = self._get_scorings()

        # compute scores
        scores = self.cv_score_mean(X, Y, scorings_)

        # unpack into MetricList
        metric_list = self.scores_to_metriclist(scorings, scores)
        return metric_list
model.py 文件源码 项目:FeatureHub 作者: HDI-Project 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def compute_metrics_train_test(self, X, Y, n):
        """Compute metrics on test set.
        """

        X, Y = self._format_matrices(X, Y)

        X_train, Y_train = X[:n], Y[:n]
        X_test, Y_test = X[n:], Y[n:]

        scorings, scorings_ = self._get_scorings()

        # Determine binary/multiclass classification
        classes = np.unique(Y)
        params = self._get_params(classes)

        # fit model on entire training set
        self.model.fit(X_train, Y_train)

        scores = {}
        for scoring in scorings_:
            scores[scoring] = self._do_scoring(scoring, params, self.model,
                    X_test, Y_test)

        metric_list = self.scores_to_metriclist(scorings, scores)
        return metric_list
agents.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def observe(self, observation):
        """Process observation for metrics."""
        if self.lastY is not None:
            self.metrics.update(observation, self.lastY)
            if 'text' in observation.keys():
                self.labels += self._text2predictions(self.lastY)
                self.observations += [observation['score']]
            self.lastY = None
        return observation
agents.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def reset_metrics(self):
        """Reset metrics, observations and labels."""
        super().reset_metrics()
        del self.observations[:]
        del self.labels[:]
agents.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def report(self):
        """Return report with metrics on the whole data."""
        loss = sklearn.metrics.log_loss(self.labels, self.observations)
        acc = sklearn.metrics.accuracy_score(self.labels,
                                             self._text2predictions(self._predictions2text(self.observations)))
        try:
            auc = sklearn.metrics.roc_auc_score(self.labels, self.observations)
        except ValueError:
            auc = 0
        report = dict()
        report['comments'] = len(self.observations)
        report['loss'] = loss
        report['accuracy'] = acc
        report['auc'] = auc
        return report
_method.py 文件源码 项目:q2-diversity 作者: qiime2 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def beta_phylogenetic(table: biom.Table, phylogeny: skbio.TreeNode,
                      metric: str, n_jobs: int=1)-> skbio.DistanceMatrix:
    if metric not in phylogenetic_metrics():
        raise ValueError("Unknown phylogenetic metric: %s" % metric)
    if table.is_empty():
        raise ValueError("The provided table object is empty")
    if n_jobs != 1 and metric == 'weighted_unifrac':
        raise ValueError("Weighted UniFrac is not parallelizable")

    counts = table.matrix_data.toarray().astype(int).T
    sample_ids = table.ids(axis='sample')
    feature_ids = table.ids(axis='observation')

    try:
        results = skbio.diversity.beta_diversity(
            metric=metric,
            counts=counts,
            ids=sample_ids,
            otu_ids=feature_ids,
            tree=phylogeny,
            pairwise_func=sklearn.metrics.pairwise_distances,
            n_jobs=n_jobs
        )
    except skbio.tree.MissingNodeError as e:
        message = str(e).replace('otu_ids', 'feature_ids')
        message = message.replace('tree', 'phylogeny')
        raise skbio.tree.MissingNodeError(message)

    return results
_method.py 文件源码 项目:q2-diversity 作者: qiime2 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def beta_phylogenetic_alt(table: BIOMV210Format, phylogeny: NewickFormat,
                          metric: str, n_jobs: int=1,
                          variance_adjusted: bool=False,
                          alpha: float=None,
                          bypass_tips: bool=False) -> skbio.DistanceMatrix:

    metrics = phylogenetic_metrics_alt_dict()
    generalized_unifrac = 'generalized_unifrac'

    if metric not in metrics:
        raise ValueError("Unknown metric: %s" % metric)

    if alpha is not None and metric != generalized_unifrac:
        raise ValueError('The alpha parameter is only allowed when the choice'
                         ' of metric is generalized_unifrac')

    # this behaviour is undefined, so let's avoid a seg fault
    cpus = psutil.cpu_count(logical=False)
    if n_jobs > cpus:
        raise ValueError('The value of n_jobs cannot exceed the number of '
                         'processors (%d) available in this system.' % cpus)

    if metric == generalized_unifrac:
        alpha = 1.0 if alpha is None else alpha
        f = partial(metrics[metric], alpha=alpha)
    else:
        f = metrics[metric]

    # unifrac processes tables and trees should be filenames
    return f(str(table), str(phylogeny), threads=n_jobs,
             variance_adjusted=variance_adjusted, bypass_tips=bypass_tips)
mongo_enabled_learning.py 文件源码 项目:model_sweeper 作者: akimovmike 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def compute_metrics(metrics, learn_data, model_data):
    target_label_gound_truth = load_df_from_sample_notation(model_data['Feature Sample Location'])[model_data['Target Variable']]
    prediction = learn_data['Prediction']
    if metric=='AUC':
        return 0.9 # zaglushka sk.metrics.auc_mathafaka( target_label_gound_truth, prediction)
mongo_enabled_learning.py 文件源码 项目:model_sweeper 作者: akimovmike 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sw_evalute_model(learn_data, overwrite_existing, worker_id=None):
#     learn_data = db['learns'].find_one(learn_id)
    model_data = db[learn_data['Model'][-1]].find_one(learn_data['Model'][0])
    if learn_data['Status']['Prediction Computed']:
        for metric in learn_data['Evaluation Results'].keys():
            if learn_data['Evaluation Results']==None or overwrite_existing:
                learn_data['Evaluation Results'][metrics] = compute_metrics(metric, learn_data, model_data)    

        learn_data['Status']['Model Evaluated'] = True
        db['learns'].update(learn_data['_id'], learn_data)
classifier.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def score(self, X, y, sample_weight=None):
        """Returns the mean accuracy on the given test data and labels.

        NOTE: In the condition of sklearn.svm.SVC with precomputed kernel
        when the kernel matrix is computed portion by portion, the function
        will ignore the first input argument X.

        Parameters
        ----------
        X: list of tuple (data1, data2)
            data1 and data2 are numpy array in shape [num_TRs, num_voxels]
            to be computed for correlation.
            They are test samples.
            They contain the activity data filtered by ROIs
            and prepared for correlation computation.
            Within list, all data1s must have the same num_voxels value,
            all data2s must have the same num_voxels value.
            len(X) is the number of test samples.

        y: 1D numpy array
            labels, len(X) equals len(y), which is num_samples
        sample_weight: 1D array in shape [num_samples], optional
            Sample weights.

        Returns
        -------
        score : float
            Mean accuracy of self.predict(X) wrt. y.
        """
        from sklearn.metrics import accuracy_score
        if isinstance(self.clf, sklearn.svm.SVC) \
                and self.clf.kernel == 'precomputed' \
                and self.training_data_ is None:
            result = accuracy_score(y, self.predict(),
                                    sample_weight=sample_weight)
        else:
            result = accuracy_score(y, self.predict(X),
                                    sample_weight=sample_weight)
        return result
seizure_modeling.py 文件源码 项目:kaggle-seizure-prediction 作者: sics-lm 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_report(clf, test_data_x, test_data_y):
    """
    Returns a string with a report of how the classifier *clf* does on the test data.

    :param clf: The classifier to use for calculating the scores.
    :param test_data_x: The test data observations to use for predictions.
    :param test_data_y: The test data class label to use.
    :return: A string containing a report on the performance of the classifier comparing the predicted class labels
             versus the true.
    """
    test_data_y_pred = predict(clf, test_data_x, probabilities=False)

    report_lines = [
        "Classification report:",
        "Best parameters set found on development set:",
        "",
        str(clf.best_estimator_),
        "",
        grid_scores(clf),
        "Detailed classification report:",
        ""
        "The model is trained on the full development set.",
        "The scores are computed on the full evaluation set.",
        "",
        sklearn.metrics.classification_report(test_data_y, test_data_y_pred),
        "",
        cm_report(sklearn.metrics.confusion_matrix(test_data_y, test_data_y_pred),
                  labels=['Interictal', 'Preictal']),
        "",
    ]
    report = '\n'.join(report_lines)
    return report
vwoptimize.py 文件源码 项目:vwoptimize 作者: denik 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def root_mean_squared_error(*args, **kwargs):
    import sklearn.metrics
    return math.sqrt(sklearn.metrics.mean_squared_error(*args, **kwargs))
vwoptimize.py 文件源码 项目:vwoptimize 作者: denik 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def extract_score(metric, outputs):
    if not outputs:
        raise ValueError('error: No output captured from vw')

    orig_outputs = outputs

    stage, metric = _parse_vw_metric(metric)
    outputs = (outputs or {}).get(stage)

    if not outputs:
        raise ValueError('error: No output for stage %r. Available: %r' % (stage, ', '.join(orig_outputs.keys())))

    values = [x.get(metric) for x in outputs]

    for item in values:
        if item is None:
            raise ValueError('Metric (%s)%s not found. Available metrics: %s' % (stage, metric, outputs[0].keys()))

    try:
        values = [float(x) for x in values]
    except Exception:
        if values[0].endswith(' h'):
            return values
        return None

    return values
vwoptimize.py 文件源码 项目:vwoptimize 作者: denik 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def recall_at_precision(*args, **kwargs):
    from sklearn.metrics import precision_recall_curve
    metric_param = kwargs.pop('metric_param')
    required_precision = _parse_number_or_fraction(metric_param)
    precision, recall, thresholds = precision_recall_curve(*args, **kwargs)

    for pr, r in izip(precision, recall):
        if pr >= required_precision:
            return r
vwoptimize.py 文件源码 项目:vwoptimize 作者: denik 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def log_report_one(prefix, metrics, y_true, y_pred, sample_weight, config, classification_report, outputs=None, mask=None):

    if mask is not None:
        y_true = np.ma.MaskedArray(y_true, mask=mask).compressed()
        y_pred = np.ma.MaskedArray(y_pred, mask=mask).compressed()
        sample_weight = np.ma.MaskedArray(sample_weight, mask=mask).compressed() if sample_weight is not None else None
        assert y_true.shape == y_pred.shape, (y_true.shape, y_pred.shape)

    for metric in metrics:
        log_always('%s%s = %s', prefix, metric, _frmt_score(calculate_or_extract_score(metric, y_true, y_pred, config, outputs=outputs, sample_weight=sample_weight)))

    if classification_report:
        assert y_true is not None
        assert y_pred is not None
        log_classification_report(prefix, y_true, y_pred, labels=config.get('named_labels'), threshold=config.get('threshold'))  # XXX sample_weight
evaluation.py 文件源码 项目:complex 作者: ttrouill 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, preds, true_vals, ranks, raw_ranks):
        self.preds = preds
        self.ranks = ranks
        self.true_vals = true_vals
        self.raw_ranks = raw_ranks

        #Test if not all the prediction are the same, sometimes happens with overfitting,
        #and leads scikit-learn to output incorrect average precision (i.e ap=1)
        if not (preds == preds[0]).all() :
            #Due to the use of np.isclose in sklearn.metrics.ranking._binary_clf_curve (called by following metrics function),
            #I have to rescale the predictions if they are too small:
            preds_rescaled = preds

            diffs = np.diff(np.sort(preds))
            min_diff = min(abs(diffs[np.nonzero(diffs)]))
            if min_diff < 1e-8 : #Default value of absolute tolerance of np.isclose
                preds_rescaled = (preds * ( 1e-7 / min_diff )).astype('d')

            self.ap = sklearn.metrics.average_precision_score(true_vals,preds_rescaled)
            self.precision, self.recall, self.thresholds = sklearn.metrics.precision_recall_curve(true_vals,preds_rescaled) 
        else:
            logger.warning("All prediction scores are equal, probable overfitting, replacing scores by random scores")
            self.ap = (true_vals == 1).sum() / float(len(true_vals))
            self.thresholds = preds[0]
            self.precision = (true_vals == 1).sum() / float(len(true_vals))
            self.recall = 0.5


        self.mrr =-1
        self.raw_mrr =-1

        if ranks is not None:
            self.mrr = np.mean(1.0 / ranks)
            self.raw_mrr = np.mean(1.0 / raw_ranks)
detection.py 文件源码 项目:FeatureSqueezing 作者: QData 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def train_detector(x_train, y_train, x_val, y_val):
    fpr, tpr, thresholds = roc_curve(y_train, x_train)
    accuracy = [ sklearn.metrics.accuracy_score(y_train, x_train>threshold, normalize=True, sample_weight=None) for threshold in thresholds ]
    roc_auc = auc(fpr, tpr)

    idx_best = np.argmax(accuracy)
    print "Best training accuracy: %.4f, TPR(Recall): %.4f, FPR: %.4f @%.4f" % (accuracy[idx_best], tpr[idx_best], fpr[idx_best], thresholds[idx_best])
    print "ROC_AUC: %.4f" % roc_auc

    accuracy_val = [ sklearn.metrics.accuracy_score(y_val, x_val>threshold, normalize=True, sample_weight=None) for threshold in thresholds ]
    tpr_val, fpr_val = zip(*[ get_tpr_fpr(y_val, x_val, threshold)  for threshold in thresholds  ])
    # roc_auc_val = auc(fpr_val, tpr_val)
    print "Validation accuracy: %.4f, TPR(Recall): %.4f, FPR: %.4f @%.4f" % (accuracy_val[idx_best], tpr_val[idx_best], fpr_val[idx_best], thresholds[idx_best])

    return threshold, accuracy_val, fpr_val, tpr_val
utils.py 文件源码 项目:amle 作者: elibol 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def roc_auc_score(y_truth, y_pred, num_classes=None):
    return sklearn.metrics.roc_auc_score(*map(partial(to_matrix, num_classes=num_classes), [y_truth, y_pred]))


#########################
# AUTOSKLEARN UTILS
#########################
ScikitLearners.py 文件源码 项目:Aion 作者: aleisalem 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculateMetrics(truth, predicted):
    """
    Calculates and returns a set of metrics from ground truth and predicted vectors
    :param truth: A list of ground truth labels
    :type truth: list
    :param predicted: A list of predicted labels
    :type predicted: list
    :return: A dict of metrics including accuracy, recall, specificity, precision, and F1-score
    """
    try:
        # Sanity check
        if not len(truth) == len(predicted):
            prettyPrint("The two vectors have different dimensionality", "warning")
            return {}

        metrics = {}
        # Calculate different mterics
        metrics["accuracy"] = accuracy_score(truth, predicted)
        metrics["recall"] = recall_score(truth, predicted)
        metrics["specificity"] = specificity_score(truth, predicted) # From Aion.utils.misc
        metrics["precision"] = precision_score(truth, predicted)
        metrics["f1score"] = f1_score(truth, predicted)

    except Exception as e:
        prettyPrintError(e)
        return {}

    return metrics
model_eval.py 文件源码 项目:healthcareai-py 作者: HealthCatalyst 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def calculate_binary_classification_metrics(trained_sklearn_estimator, x_test, y_test):
    """
    Given a trained estimator, calculate metrics.

    Args:
        trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
        x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
        y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)

    Returns:
        dict: A dictionary of metrics objects
    """
    # Squeeze down y_test to 1D
    y_test = np.squeeze(y_test)

    _validate_predictions_and_labels_are_equal_length(x_test, y_test)

    # Get binary and probability classification predictions
    binary_predictions = np.squeeze(trained_sklearn_estimator.predict(x_test))
    probability_predictions = np.squeeze(trained_sklearn_estimator.predict_proba(x_test)[:, 1])

    # Calculate accuracy
    accuracy = skmetrics.accuracy_score(y_test, binary_predictions)
    roc = compute_roc(y_test, probability_predictions)
    pr = compute_pr(y_test, probability_predictions)

    # Unpack the roc and pr dictionaries so the metric lookup is easier for plot and ensemble methods
    return {'accuracy': accuracy, **roc, **pr}
model.py 文件源码 项目:FeatureHub 作者: HDI-Project 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _get_scorings(self):
        """Get scorings for this problem type.

        Returns
        -------
        scorings : list of dict
            Information on metric name and associated "scoring" as defined in
            sklearn.metrics
        scorings_ : list
            List of "scoring" as defined in sklearn.metrics. This is a "utility
            variable" that can be used where we just need the names of the
            scoring functions and not the more complete information.
        """
        # scoring_types maps user-readable name to `scoring`, as argument to
        # cross_val_score
        # See also http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
        if self._is_classification():
            scorings = Model.CLASSIFICATION_SCORING
            scorings_= [s["scoring"] for s in scorings]
        elif self._is_regression():
            scorings = Model.REGRESSION_SCORING
            scorings_= [s["scoring"] for s in scorings]
        else:
            raise NotImplementedError

        return scorings, scorings_
base.py 文件源码 项目:EvadeML-Zoo 作者: mzweilin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def evalulate_detection_test(Y_detect_test, Y_detect_pred):
    accuracy = sklearn.metrics.accuracy_score(Y_detect_test, Y_detect_pred, normalize=True, sample_weight=None)
    tpr, fpr, tp, ap = get_tpr_fpr(Y_detect_test, Y_detect_pred)
    return accuracy, tpr, fpr, tp, ap
train_tclstm.py 文件源码 项目:tdlstm 作者: bluemonk482 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def eval(self, session, feed, saver, early_stopping_rounds, early_stopping_metric_list, early_stopping_metric_minimize=False, metrics='accuracy'):
        test_loss_value, acc_test, pred = session.run(self.test_loss, feed)
        f1_3class, f1_2class = fscores(self.data.dev_y, pred)
        if not self.tuning:
            print("*** Validation Loss = {:.6f}; Validation Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
                        .format(test_loss_value, acc_test, f1_3class, f1_2class))
            print()
        early_stop = False
        early_stopping_score = -1
        if metrics == 'accuracy':
            early_stopping_score = acc_test
            early_stopping_metric_list.append(acc_test)
        elif metrics == '3classf1':
            early_stopping_score = f1_3class
            early_stopping_metric_list.append(f1_3class)
        elif metrics == '2classf1':
            early_stopping_score = f1_2class
            early_stopping_metric_list.append(f1_2class)
        assert early_stopping_score > 0

        if (not self.FLAGS.restore) and (early_stopping_metric_minimize): # For minimising the eval score
            if all(early_stopping_score <= i for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                # best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(min(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class), early_stop)
        elif not (self.FLAGS.restore and early_stopping_metric_minimize):  # For maximising the eval score
            if all(early_stopping_score >= i for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                # best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(max(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class), early_stop)
train_tdlstm.py 文件源码 项目:tdlstm 作者: bluemonk482 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def eval(self, session, feed, saver, early_stopping_rounds, early_stopping_metric_list, early_stopping_metric_minimize=False, metrics='accuracy'):
        test_loss_value, acc_test, pred = session.run(self.test_loss, feed)
        f1_3class, f1_2class = fscores(self.data.dev_y, pred)
        if not self.tuning:
            print("*** Validation Loss = {:.6f}; Validation Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
                        .format(test_loss_value, acc_test, f1_3class, f1_2class))
            print()
        early_stop = False
        early_stopping_score = -1
        if metrics == 'accuracy':
            early_stopping_score = acc_test
            early_stopping_metric_list.append(acc_test)
        elif metrics == '3classf1':
            early_stopping_score = f1_3class
            early_stopping_metric_list.append(f1_3class)
        elif metrics == '2classf1':
            early_stopping_score = f1_2class
            early_stopping_metric_list.append(f1_2class)
        assert early_stopping_score > 0

        if (not self.FLAGS.restore) and (early_stopping_metric_minimize): # For minimising the eval score
            if all(early_stopping_score <= i for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(min(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class), early_stop)
        elif not (self.FLAGS.restore and early_stopping_metric_minimize):  # For maximising the eval score
            if all(early_stopping_score >= i for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(max(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class), early_stop)
train_lstm.py 文件源码 项目:tdlstm 作者: bluemonk482 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def eval(self, session, feed, saver, early_stopping_rounds, early_stopping_metric_list, early_stopping_metric_minimize=False, metrics='accuracy'):
        test_loss_value, acc_test, pred, eval_summary = session.run(self.test_loss, feed)
        f1_3class, f1_2class = fscores(self.data.dev_y, pred)
        if not self.tuning:
            print("*** Validation Loss = {:.6f}; Validation Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
                        .format(test_loss_value, acc_test, f1_3class, f1_2class))
            print()
        early_stop = False
        early_stopping_score = -1
        if metrics == 'accuracy':
            early_stopping_score = acc_test
            early_stopping_metric_list.append(acc_test)
        elif metrics == '3classf1':
            early_stopping_score = f1_3class
            early_stopping_metric_list.append(f1_3class)
        elif metrics == '2classf1':
            early_stopping_score = f1_2class
            early_stopping_metric_list.append(f1_2class)
        assert early_stopping_score > 0

        if (not self.FLAGS.restore) and (early_stopping_metric_minimize): # For minimising the eval score
            if all(early_stopping_score <= i for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(min(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class), early_stop)
        elif not (self.FLAGS.restore and early_stopping_metric_minimize):  # For maximising the eval score
            if all(early_stopping_score >= i for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(max(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class), early_stop, eval_summary)
KerasWrapper.py 文件源码 项目:audit-log-detection 作者: twosixlabs 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def compile(self):

        self.model_.compile(optimizer=self.optimizer, loss=self.loss, metrics=None)


问题


面经


文章

微信
公众号

扫码关注公众号