python类itemgetter()的实例源码

in_db.py 文件源码 项目:bibcure 作者: bibcure 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def update_bibs_in(grouped_bibs, db_abbrev):
    actions = {
        "y": lambda items: [update_in(bibs, db_abbrev) for bibs in items],
        "m": lambda items: [manual_update_in(bibs, db_abbrev) for bibs in items],
        "n": lambda items: items
    }
    print("\n ")
    action = input("Abbreviate everthing?" +
                   "y(yes, automatic)/m(manual)/n(do nothing)")
    grouped_bibs.sort(key=operator.itemgetter('journal'))
    grouped_by_journal = []
    for key, items in groupby(grouped_bibs, lambda i: i["journal"]):
        grouped_by_journal.append(list(items))

    if action in ("y", "m", "n"):
        updated_bibs = actions.get(action)(grouped_by_journal)
    else:
        return update_bibs_in(grouped_bibs, db_abbrev)

    updated_bibs = reduce(lambda a, b: a+b, updated_bibs)
    return updated_bibs
WLE_stats_generation.py 文件源码 项目:WikiLoves 作者: wmes 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def create_pie_chart (input_dict, input_colors, suffix, special_item_key=None) :
    if special_item_key != None :
        special_item = dict()
        special_item[special_item_key] = 0

    output_text = u'{{#invoke:Chart|pie chart\n' \
                  u'| radius = 180\n' \
                  u'| slices = \n'
    input_dict = dict(input_dict)
    sorted_dict = OrderedDict(sorted(input_dict.items(), key=itemgetter(1), reverse=True))
    for key, value in sorted_dict.iteritems() :
        if special_item_key == None or key != special_item_key :
            output_text += u'    ( %d: %s : %s)\n' %(value, key, input_colors[key])
        else :
            special_item[special_item_key] = value

    if special_item_key != None :
        output_text += u'    ( %d: %s : %s)\n' % (special_item[special_item_key], special_item_key, input_colors[special_item_key])

    output_text += u'| units suffix = _%s\n' \
                   u'| percent = true\n' \
                   u'}}\n' %(suffix)
    return output_text
face_tracking.py 文件源码 项目:Resnet-Emotion-Recognition 作者: safreita1 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def smooth_emotions(self, prediction):
        emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
        emotion_values = {'Angry': 0.0, 'Disgust': 0.0, 'Fear': 0.0, 'Happy': 0.0, 'Sad': 0.0, 'Surprise': 0.0, 'Neutral': 0.0}

        emotion_probability, emotion_index = max((val, idx) for (idx, val) in enumerate(prediction[0]))
        emotion = emotions[emotion_index]

        # Append the new emotion and if the max length is reached pop the oldest value out
        self.emotion_queue.appendleft((emotion_probability, emotion))

        # Iterate through each emotion in the queue and create an average of the emotions
        for pair in self.emotion_queue:
            emotion_values[pair[1]] += pair[0]

        # Select the current emotion based on the one that has the highest value
        average_emotion = max(emotion_values.iteritems(), key=operator.itemgetter(1))[0]

        return average_emotion
pyclbr.py 文件源码 项目:kinect-2-libras 作者: inessadl 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _main():
    # Main program for testing.
    import os
    mod = sys.argv[1]
    if os.path.exists(mod):
        path = [os.path.dirname(mod)]
        mod = os.path.basename(mod)
        if mod.lower().endswith(".py"):
            mod = mod[:-3]
    else:
        path = []
    dict = readmodule_ex(mod, path)
    objs = dict.values()
    objs.sort(lambda a, b: cmp(getattr(a, 'lineno', 0),
                               getattr(b, 'lineno', 0)))
    for obj in objs:
        if isinstance(obj, Class):
            print "class", obj.name, obj.super, obj.lineno
            methods = sorted(obj.methods.iteritems(), key=itemgetter(1))
            for name, lineno in methods:
                if name != "__path__":
                    print "  def", name, lineno
        elif isinstance(obj, Function):
            print "def", obj.name, obj.lineno
runRIPETraceroute.py 文件源码 项目:netra 作者: akshah 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def selectProbes(prList):
    retIDList=[]
    distances=[]
    if len(prList) ==1:
        id,lat,lon=prList[0]
        retIDList.append(id)
    else:
        for iter in range(0,len(prList)-1):
            id,lat,lon=prList[iter]
            for iter2 in range(iter+1,len(prList)):
                id2,lat2,lon2=prList[iter2]
                dist=haversine(lon,lat,lon2,lat2)
                distances.append([id,id2,dist])
            #retIDList.append(id)
        sortedDistances=sorted(distances, key=itemgetter(2),reverse=True)
        ID1,ID2,dist=sortedDistances[0]#Top one
        retIDList=[ID1,ID2]
    return retIDList
variable_mgr_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __call__(self, getter, *args, **kwargs):
    size = tf.TensorShape(kwargs['shape']).num_elements()
    if size < self.small_variable_size_threshold:
      device_name = self.device_for_small_variables
    else:
      device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
      device_name = self.devices[device_index]
      self.sizes[device_index] += size

    kwargs['caching_device'] = device_name
    var = getter(*args, **kwargs)
    return var


# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
main.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def index(pattern=None):
  """Renders index.html page with a list of benchmarks."""
  filter_regex = None
  if pattern:
    filter_regex = re.compile(urllib.parse.unquote(pattern))
  min_time_to_lookup = datetime.now() - timedelta(days=_MAX_DAYS_WITHOUT_RUN)

  client = datastore.Client()
  query = client.query(kind='Test')
  query.add_filter('start', '>', min_time_to_lookup)

  fetched = list(query.fetch())
  test_names = {}  # maps test name to encoded test name
  for fetched_result in fetched:
    if fetched_result['test'] in test_names:
      continue  # already added
    if not filter_regex or re.search(pattern, fetched_result['test']):
      test_names[fetched_result['test']] = urllib.parse.quote(
          fetched_result['test'], safe='')

  # convert test_names to list and sort
  test_names = sorted(test_names.items(), key=itemgetter(1), reverse=True)

  return render_template('index.html', tests=test_names)
Cluster.py 文件源码 项目:rebuild_obfuscator 作者: irobert-tluo 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def cluster(self, data_set, threshold, verbose=True):
        prev_map = {}
        grp_id = 0
        for index in range(len(data_set)):
            sample = data_set[index]
            if not verbose:
                print "[+] Processing Sample:", sample["id"]
            scores = {prev["id"] : Util.simscore(sample["encoded"], prev["encoded"]) for prev in data_set[:index]}
            if len(scores) > 0 and max(scores.values()) > threshold:
                closest = max(scores.iteritems(), key=operator.itemgetter(1))[0]
                if not verbose:
                   print "[+] Found Closet Cluster:", closest
                cur_grp_id = prev_map[closest]
            else:
                grp_id += 1
                cur_grp_id = grp_id
            prev_map[sample["id"]] = cur_grp_id
        grp_info = {}
        for sid, gid in prev_map.iteritems():
            if gid not in grp_info:
                grp_info[gid] = []
            grp_info[gid].append(sid)
        return grp_info
Cluster.py 文件源码 项目:rebuild_obfuscator 作者: irobert-tluo 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def cluster(self, data_set, threshold, verbose=True):
        grp_map = {}
        grp_id = 0
        for index in range(len(data_set)):
            sample = data_set[index]
            if not verbose:
                print "[+] Processing Sample:", sample["id"]
            scores = {}
            for prev_grp_id, prev_grp_data in grp_map.iteritems():
              scores[prev_grp_id] = min([Util.simscore(sample["encoded"], prev["encoded"]) for prev in prev_grp_data])
            if len(scores) == 0 or max(scores.values()) < threshold:
                grp_id += 1
                cur_grp_id = grp_id
                grp_map[cur_grp_id] = []
            else:
                cur_grp_id = max(scores.iteritems(), key=operator.itemgetter(1))[0]
            if not verbose:
                print "[+] Found Closet Cluster:", cur_grp_id
            grp_map[cur_grp_id].append(sample)
        grp_info = {}
        for prev_grp_id, prev_grp_data in grp_map.iteritems():
            grp_info[prev_grp_id] = [prev["id"] for prev in prev_grp_data]
        return grp_info
groupby.py 文件源码 项目:dask_gdf 作者: gpuopenanalytics 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _do_grouping(self):
        """Group the dataframe
        """
        # First, do groupby on the first key by sorting on the first key.
        # This will sort & shuffle the partitions.
        firstkey = self._by[0]
        df = self._df.sort_value(firstkey)
        groups = df.to_delayed()
        # Second, do groupby internally for each partition.
        @delayed
        def _groupby(df, by):
            grouped = df.groupby(by=by)
            ovdata = _extract_data_to_check_group_overlap(grouped, by)
            return grouped, ovdata

        grouped = [_groupby(g, self._by) for g in groups]
        # Persist the groupby operation to avoid duplicating the work
        grouped = persist(*grouped)
        # Get the groupby objects
        outgroups = list(map(delayed(operator.itemgetter(0)), grouped))
        _check_group_non_overlap_assumption(grouped)
        return outgroups
heist.py 文件源码 项目:Jumper-Cogs 作者: Redjumpman 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def _targets_heist(self, ctx):
        """Shows a list of targets"""
        server = ctx.message.server
        settings = self.check_server_settings(server)
        t_vault = settings["Theme"]["Vault"]

        if len(settings["Targets"].keys()) < 0:
            msg = ("There aren't any targets! To create a target use {}heist "
                   "createtarget .".format(ctx.prefix))
        else:
            target_names = [x for x in settings["Targets"]]
            crews = [int(subdict["Crew"]) for subdict in settings["Targets"].values()]
            success = [str(subdict["Success"]) + "%" for subdict in settings["Targets"].values()]
            vaults = [subdict["Vault"] for subdict in settings["Targets"].values()]
            data = list(zip(target_names, crews, vaults, success))
            table_data = sorted(data, key=itemgetter(1), reverse=True)
            table = tabulate(table_data, headers=["Target", "Max Crew", t_vault, "Success Rate"])
            msg = "```C\n{}```".format(table)

        await self.bot.say(msg)
svg.py 文件源码 项目:sketch-components 作者: ibhubs 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def segments(self, precision=0):
        if max(self.rx, self.ry) < precision:
            return [[self.center]]

        p = [(0, self.P(0)), (1, self.P(1))]
        d = 2 * max(self.rx, self.ry)

        while d > precision:
            for (t1, p1), (t2, p2) in zip(p[:-1], p[1:]):
                t = t1 + (t2 - t1) / 2.
                d = Segment(p1, p2).pdistance(self.P(t))
                p.append((t, self.P(t)))
            p.sort(key=operator.itemgetter(0))

        ret = [x for t, x in p]
        return [ret]
geometry.py 文件源码 项目:sketch-components 作者: ibhubs 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def simplify_segment(segment, epsilon):
    """Ramer-Douglas-Peucker algorithm"""
    if len(segment) < 3 or epsilon <= 0:
        return segment[:]

    l = Segment(segment[0], segment[-1])  # Longest segment

    # Find the furthest point from the segment
    index, maxDist = max([(i, l.pdistance(p)) for i, p in enumerate(segment)],
                         key=operator.itemgetter(1))

    if maxDist > epsilon:
        # Recursively call with segment splited in 2 on its furthest point
        r1 = simplify_segment(segment[:index + 1], epsilon)
        r2 = simplify_segment(segment[index:], epsilon)
        # Remove redundant 'middle' Point
        return r1[:-1] + r2
    else:
        return [segment[0], segment[-1]]
test_model_io.py 文件源码 项目:deb-python-cassandra-driver 作者: openstack 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_model_read_as_dict(self):
        """
        Tests that columns of an instance can be read as a dict.
        """
        tm = TestModel.create(count=8, text='123456789', a_bool=True)
        column_dict = {
            'id': tm.id,
            'count': tm.count,
            'text': tm.text,
            'a_bool': tm.a_bool,
        }
        self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys()))

        self.assertSetEqual(set(tm.values()), set(column_dict.values()))
        self.assertEqual(
            sorted(tm.items(), key=itemgetter(0)),
            sorted(column_dict.items(), key=itemgetter(0)))
        self.assertEqual(len(tm), len(column_dict))
        for column_id in column_dict.keys():
            self.assertEqual(tm[column_id], column_dict[column_id])

        tm['count'] = 6
        self.assertEqual(tm.count, 6)
AUC_Rank_Weighted_Average.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def ranking(score):
    """ method to create a score into rank"""
    data=[]
    for i in range(len(score)):
        data.append([score[i],i])
    data=sorted(data, key=operator.itemgetter(0), reverse=False)
    value=data[0][0]
    data[0][0]=1
    for i in range(1,len(score)):
        val=data[i][0]
        if val>value :
            value=val
            data[i][0]=(i+1)
        else :
            data[i][0]=data[i-1][0]
    data=sorted(data, key=operator.itemgetter(1), reverse=False)
    final_rank=[]
    for i in range(len(score)):
        final_rank.append(data[i][0])
    return final_rank

#retrieve specific column fron 2dimensional array as a 1dimensional array
AUC_Weighted_Average.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def ranking(score):
    """ method to create a score into rank"""
    data=[]
    for i in range(len(score)):
        data.append([score[i],i])
    data=sorted(data, key=operator.itemgetter(0), reverse=False)
    value=data[0][0]
    data[0][0]=1
    for i in range(1,len(score)):
        val=data[i][0]
        if val>value :
            value=val
            data[i][0]=(i+1)
        else :
            data[i][0]=data[i-1][0]
    data=sorted(data, key=operator.itemgetter(1), reverse=False)
    final_rank=[]
    for i in range(len(score)):
        final_rank.append(data[i][0])
    return final_rank

#retrieve specific column fron 2dimensional array as a 1dimensional array
AUC_Average.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def ranking(score):
    """ method to create a score into rank"""
    data=[]
    for i in range(len(score)):
        data.append([score[i],i])
    data=sorted(data, key=operator.itemgetter(0), reverse=False)
    value=data[0][0]
    data[0][0]=1
    for i in range(1,len(score)):
        val=data[i][0]
        if val>value :
            value=val
            data[i][0]=(i+1)
        else :
            data[i][0]=data[i-1][0]
    data=sorted(data, key=operator.itemgetter(1), reverse=False)
    final_rank=[]
    for i in range(len(score)):
        final_rank.append(data[i][0])
    return final_rank

#retrieve specific column fron 2dimensional array as a 1dimensional array
AUC_Geo_Rank_Weighted_Average.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def ranking(score):
    """ method to create a score into rank"""
    data=[]
    for i in range(len(score)):
        data.append([score[i],i])
    data=sorted(data, key=operator.itemgetter(0), reverse=False)
    value=data[0][0]
    data[0][0]=1
    for i in range(1,len(score)):
        val=data[i][0]
        if val>value :
            value=val
            data[i][0]=(i+1)
        else :
            data[i][0]=data[i-1][0]
    data=sorted(data, key=operator.itemgetter(1), reverse=False)
    final_rank=[]
    for i in range(len(score)):
        final_rank.append(data[i][0])
    return final_rank

#retrieve specific column fron 2dimensional array as a 1dimensional array
vocab.py 文件源码 项目:scientific-paper-summarisation 作者: EdCo95 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def prune(self, min_freq=5, max_size=sys.maxsize):
        """returns new Vocab object, pruned based on minimum symbol frequency"""
        pruned_vocab = Vocab(unk=self.unk, emb=self.emb)
        cnt = 0
        for sym, freq in sorted(self.sym2freqs.items(), key=operator.itemgetter(1), reverse=True):
            # for sym in self.sym2freqs:
            # freq = self.sym2freqs[sym]
            cnt += 1
            if freq >= min_freq and cnt < max_size:
                pruned_vocab(sym)
                pruned_vocab.sym2freqs[sym] = freq
        if self.frozen:
            # if original Vocab was frozen, freeze new one
            pruned_vocab.freeze()

        return pruned_vocab
_collections.py 文件源码 项目:Flask_Blog 作者: sugarguo 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _manage_size(self):
        if not self._mutex.acquire(False):
            return
        try:
            while len(self) > self.capacity + self.capacity * self.threshold:
                by_counter = sorted(dict.values(self),
                                    key=operator.itemgetter(2),
                                    reverse=True)
                for item in by_counter[self.capacity:]:
                    try:
                        del self[item[0]]
                    except KeyError:
                        # deleted elsewhere; skip
                        continue
        finally:
            self._mutex.release()
ServerStats.py 文件源码 项目:pineapple 作者: peter765 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def rolestat(self, message_object):
        server = message_object.server
        msg = "Role stats for this server (" + str(server.member_count) + " users in total):\n"

        roles = dict()

        for member in server.members:
            for member_role in member.roles:
                if member_role.name != "@everyone":
                    if member_role.name in roles:
                        roles[member_role.name] += 1
                    else:
                        roles[member_role.name] = 1
        sorted_x = sorted(roles.items(), key=operator.itemgetter(1))
        for role, count in reversed(sorted_x):
            msg += role + ": " + str(count) + " users\n"

        await self.pm.clientWrap.send_message(self.name, message_object.channel, msg)
ml-original-data.py 文件源码 项目:kaggle-spark-ml 作者: imgoodman 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def predict_dt():
    data_with_idx=data_dt.zipWithIndex().map(lambda k,v : (v,k))
    test=data_with_idx.sample(False, 0.2, 42)
    train=data_with_idx.subtractByKey(test)
    test_data=test.map(lambda idx,p:p)
    train_data=train.map(lambda idx,p:p)
    maxDepths=[1,2,3,4,5,10,20]
    maxBins=[2,4,8,16,32,64,100]
    m={}
    for maxDepth in maxDepths:
        for maxBin in maxBins:
            metrics=evaluate_dt(train_data, test_data, maxDepth, maxBin)
            print( "metrics in maxDepth: %d; maxBins: %d" % (maxDepth, maxBin))
            print( metrics)
            m["maxDepth:%d;maxBins:%d" % (maxDepth, maxBin)]=metrics[2]
    mSort=sorted(m.iteritems(), key=operator.itemgetter(1), reverse=True)
    print( mSort)
BotAdmin.py 文件源码 项目:CorpBot.py 作者: corpnewt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def ignored(self, ctx):
        """Lists the users currently being ignored."""
        ignoreArray = self.settings.getServerStat(ctx.message.server, "IgnoredUsers")

        # rows_by_lfname = sorted(rows, key=itemgetter('lname','fname'))

        promoSorted = sorted(ignoreArray, key=itemgetter('Name'))

        if not len(promoSorted):
            msg = 'I\'m not currently ignoring anyone.'
            await self.bot.send_message(ctx.message.channel, msg)
            return

        roleText = "Currently Ignored Users:\n"

        for arole in promoSorted:
            for role in ctx.message.server.members:
                if role.id == arole["ID"]:
                    # Found the role ID
                    roleText = '{}*{}*\n'.format(roleText, DisplayName.name(role))

        await self.bot.send_message(ctx.message.channel, roleText)
knn.py 文件源码 项目:machine-learning 作者: zzw0929 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def classify0(inX, dataSet, labels, k):

    # ????
    dataSetSize = dataSet.shape[0]
    diffMat =  tile(inX, (dataSetSize,1)) - dataSet
    sqDiffMat = diffMat**2
    sqDistances = sqDiffMat.sum(axis=1)
    distances = sqDistances**0.5
    sortedDistIndicies = distances.argsort()
    # ???????k??
    classCount = {}
    for i in range(k):
        voteIlabel = labels[sortedDistIndicies[i]]
        classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
    # ??
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]
TraceAnalysis.py 文件源码 项目:VMAttack 作者: anatolikalysch 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def address_count(trace):
    """
    Count the diffetent occurences of the addresses in the trace and return a sorted(highest->lowest) occurence list
    :param trace: execution trace of the binary
    :return: sorted list starting with the highest address count and ending with the lowest
    """
    trace = [line.addr for line in trace]
    analysis_result = {}
    for addr in trace:
        # for heuristic analysis the count of address
        count = trace.count(addr)
        if addr not in analysis_result.keys():
            analysis_result[addr] = count
    # sort the analysis result by most common addresses
    sorted_result = sorted(analysis_result.items(), key=operator.itemgetter(1))
    sorted_result.reverse()
    return sorted_result
test_limesurvey.py 文件源码 项目:limesurveyrc2api 作者: lindsay-stevens 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setUpClass(cls):

        # Read config.ini file
        current_dir = os.path.dirname(os.path.realpath(__file__))
        config_path = os.path.join(current_dir, 'config.ini')
        confparser = ConfigParser()
        with open(config_path, "r") as config_file:
            confparser.read_file(config_file)
        cls.url = confparser['test']['url']
        cls.username = confparser['test']['username']
        cls.password = confparser['test']['password']
        cls.api = LimeSurvey(
            url=cls.url,
            username=cls.username)
        cls.session_key = None
        cls.api.open(password=cls.password)

        surveys = sorted(cls.api.survey.list_surveys(), key=itemgetter("sid"))
        cls.survey_id = surveys[0].get("sid")
        cls.survey_id_invalid = -1
project_stats.py 文件源码 项目:FRG-Crowdsourcing 作者: 97amarnathk 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def get_stats(project_id, geo=False, period='2 week'):
    """Return the stats of a given project."""
    hours, hours_anon, hours_auth, max_hours, \
        max_hours_anon, max_hours_auth = stats_hours(project_id, period)
    users, anon_users, auth_users = stats_users(project_id, period)
    dates, dates_anon, dates_auth = stats_dates(project_id, period)


    n_tasks(project_id)
    sum(dates.values())

    sorted(dates.iteritems(), key=operator.itemgetter(0))

    dates_stats = stats_format_dates(project_id, dates,
                                     dates_anon, dates_auth)

    hours_stats = stats_format_hours(project_id, hours, hours_anon, hours_auth,
                                     max_hours, max_hours_anon, max_hours_auth)

    users_stats = stats_format_users(project_id, users, anon_users, auth_users,
                                     geo)

    return dates_stats, hours_stats, users_stats
me_at_the_zoo.py 文件源码 项目:google-hashcode-2017 作者: unibg-seclab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def knapsack(items, capacity):
    table = [[0 for w in range(capacity + 1)] for j in xrange(len(items) + 1)]

    for j in xrange(1, len(items) + 1):
        item, wt, val = items[j-1]
        for w in xrange(1, capacity + 1):
            if wt > w:
                table[j][w] = table[j-1][w]
            else:
                table[j][w] = max(table[j-1][w],
                                  table[j-1][w-wt] + val)

    result = []
    w = capacity
    for j in range(len(items), 0, -1):
        was_added = table[j][w] != table[j-1][w]

        if was_added:
            item, wt, val = items[j-1]
            result.append(items[j-1])
            w -= wt

    return result, sum(map(itemgetter(2), result))
trending_today.py 文件源码 项目:google-hashcode-2017 作者: unibg-seclab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def knapsack(items, capacity):
    table = [[0 for w in range(capacity + 1)] for j in xrange(len(items) + 1)]

    for j in xrange(1, len(items) + 1):
        item, wt, val = items[j-1]
        for w in xrange(1, capacity + 1):
            if wt > w:
                table[j][w] = table[j-1][w]
            else:
                table[j][w] = max(table[j-1][w],
                                  table[j-1][w-wt] + val)

    result = []
    w = capacity
    for j in range(len(items), 0, -1):
        was_added = table[j][w] != table[j-1][w]

        if was_added:
            item, wt, val = items[j-1]
            result.append(items[j-1])
            w -= wt

    return result, sum(map(itemgetter(2), result))
wrong.py 文件源码 项目:linkedin_recommend 作者: duggalr2 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def barGraph(data_count):

    names, count_in = [], []
    data_count = sorted(data_count.items(), key=operator.itemgetter(1), reverse=True)
    for i in data_count:
        names.append(i[0])
        count_in.append(i[-1])

    plt.rcdefaults()
    fig, ax = plt.subplots()
    y_pos = np.arange(len(names))
    ax.barh(y_pos, count_in, align='center',
            color='green', ecolor='black')
    ax.set_yticks(y_pos)
    ax.set_yticklabels(names)
    ax.invert_yaxis()  # labels read top-to-bottom
    ax.set_xlabel('Categories')
    ax.set_title('# of job titles in each category')
    plt.show()


问题


面经


文章

微信
公众号

扫码关注公众号