python类OrderedDict()的实例源码

html5parser.py 文件源码 项目:SHAREOpenRefineWkshop 作者: cmh2166 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:SHAREOpenRefineWkshop 作者: cmh2166 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:Liljimbo-Chatbot 作者: chrisjim316 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:Liljimbo-Chatbot 作者: chrisjim316 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:leetcode 作者: thomasyimgit 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:leetcode 作者: thomasyimgit 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:flask_system 作者: prashasy 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:flask_system 作者: prashasy 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:news-for-good 作者: thecodinghub 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:news-for-good 作者: thecodinghub 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:CaScale 作者: Thatsillogical 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:CaScale 作者: Thatsillogical 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:ShelbySearch 作者: Agentscreech 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:ShelbySearch 作者: Agentscreech 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:where2live 作者: fbessez 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:where2live 作者: fbessez 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:respeaker_virtualenv 作者: respeaker 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:respeaker_virtualenv 作者: respeaker 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())
html5parser.py 文件源码 项目:Tencent_Cartoon_Download 作者: Fretice 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def normalizeToken(self, token):
        """ HTML5 specific normalizations to the token stream """

        if token["type"] == tokenTypes["StartTag"]:
            raw = token["data"]
            token["data"] = OrderedDict(raw)
            if len(raw) > len(token["data"]):
                # we had some duplicated attribute, fix so first wins
                token["data"].update(raw[::-1])

        return token
html5parser.py 文件源码 项目:Tencent_Cartoon_Download 作者: Fretice 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def adjust_attributes(token, replacements):
    if PY3 or _utils.PY27:
        needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
    else:
        needs_adjustment = frozenset(token['data']) & frozenset(replacements)
    if needs_adjustment:
        token['data'] = OrderedDict((replacements.get(k, k), v)
                                    for k, v in token['data'].items())


问题


面经


文章

微信
公众号

扫码关注公众号