python类Mapping()的实例源码

models.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __extended_to_json__(self) -> t.Mapping[str, t.Any]:
        """Create a extended JSON serializable representation of this object.

        This object will look like this:

        .. code:: python

            {
                'hidden': bool, # indicating if this user can once
                                # see hidden assignments.
                **self.__to_json__()
            }

        :returns: A object as described above.
        """
        return {
            "hidden": self.can_see_hidden,
            **self.__to_json__(),
        }
models.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __to_json__(self) -> t.Mapping[str, t.Any]:
        """Creates a JSON serializable representation of this object.

        This object will look like this:

        .. code:: python

            {
                'name': str, # The name of the course,
                'id': int, # The id of this course.
                'created_at': str, # ISO UTC date.
                'is_lti': bool, # Is the this course a LTI course,
            }

        :returns: A object as described above.
        """
        return {
            'id': self.id,
            'name': self.name,
            'created_at': self.created_at.isoformat(),
            'is_lti': self.lti_course_id is not None,
        }
models.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __to_json__(self) -> t.Mapping[str, t.Any]:
        """Converts a rubric of a work to a object that is JSON serializable.

        The resulting object will look like this:

        .. code:: python

            {
                'changed_at': str, # The date the history was added.
                'is_rubric': bool, # Was this history items added by a rubric
                                   # grade.
                'grade': float, # The new grade, -1 if the grade was deleted.
                'passed_back': bool, # Is this grade given back to LTI.
                'user': User, # The user that added this grade.
            }

        :returns: A object as described above.
        """
        return {
            'changed_at': self.changed_at.isoformat(),
            'is_rubric': self.is_rubric,
            'grade': self.grade,
            'passed_back': self.passed_back,
            'user': self.user,
        }
models.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __to_json__(self) -> t.Mapping[str, t.Union[str, bool, int]]:
        """Creates a JSON serializable representation of this object.


        This object will look like this:

        .. code:: python

            {
                'name': str, # The name of the file or directory.
                'id': int, # The id of this file.
                'is_directory': bool, # Is this file a directory.
            }

        :returns: A object as described above.
        """
        return {
            'name': self.name,
            'is_directory': self.is_directory,
            'id': self.id,
        }
models.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __to_json__(self) -> t.Mapping[str, t.Any]:
        """Returns the JSON serializable representation of this class.

        This representation also returns a count of the :class:`LinterState` of
        the attached :class:`LinterInstance` objects.

        :returns: A dict containing JSON serializable representations of the
                  attributes and the test state counts of this
                  AssignmentLinter.
        """
        return {
            'done': self.linters_done,
            'working': self.linters_running,
            'crashed': self.linters_crashed,
            'id': self.id,
            'name': self.name,
        }
lti.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def second_phase_lti_launch(
) -> helpers.JSONResponse[t.Mapping[str, t.Union[str, models.Assignment, bool]]
                          ]:
    launch_params = jwt.decode(
        flask.request.headers.get('Jwt', None),
        app.config['LTI_SECRET_KEY'],
        algorithm='HS512'
    )['params']
    lti = CanvasLTI(launch_params)

    user, new_token = lti.ensure_lti_user()
    course = lti.get_course()
    assig = lti.get_assignment(user)
    lti.set_user_role(user)
    new_role_created = lti.set_user_course_role(user, course)
    db.session.commit()

    result: t.Mapping[str, t.Union[str, models.Assignment, bool]]
    result = {'assignment': assig, 'new_role_created': new_role_created}
    if new_token is not None:
        result['access_token'] = new_token

    return helpers.jsonify(result)
about.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def about(
) -> JSONResponse[t.Mapping[str, t.Union[str, t.Mapping[str, bool]]]]:
    """Get the version and features of the currently running instance.

    .. :quickref: About; Get the version and features.

    :>json string version: The version of the running instance.
    :>json object features: A mapping from string to a boolean for every
        feature indicating if the current instance has it enabled.

    :returns: The mapping as described above.
    """
    features = {
        key: bool(value)
        for key, value in psef.app.config['FEATURES'].items()
    }
    return jsonify(
        {
            'version': psef.app.config['_VERSION'],
            'features': features,
        },
    )
submissions.py 文件源码 项目:CodeGra.de 作者: CodeGra-de 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_rubric(submission_id: int) -> JSONResponse[t.Mapping[str, t.Any]]:
    """Return full rubric of the :class:`.models.Assignment` of the given
    submission (:class:`.models.Work`).

    .. :quickref: Submission; Get a rubric and its selected items.

    :param int submission_id: The id of the submission
    :returns: A response containing the JSON serialized rubric as described in
        :py:meth:`.Work.__rubric_to_json__`.

    :raises APIException: If the submission with the given id does not exist.
                          (OBJECT_ID_NOT_FOUND)
    :raises PermissionException: If there is no logged in user. (NOT_LOGGED_IN)
    :raises PermissionException: If the user can not see the assignment of the
                                 given submission. (INCORRECT_PERMISSION)
    """
    work = helpers.get_or_404(models.Work, submission_id)
    auth.ensure_permission('can_see_assignments', work.assignment.course_id)
    return jsonify(work.__rubric_to_json__())
utils.py 文件源码 项目:mimesis 作者: lk-geimfari 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def update_dict(initial: JSON, other: Mapping) -> JSON:
    """Recursively update a dictionary.

    :param initial: Dict to update.
    :type initial: dict or list
    :param other: Dict to update from.
    :type other: Mapping
    :return: Updated dict.
    :rtype: dict
    """
    for key, value in other.items():
        if isinstance(value, collections.Mapping):
            r = update_dict(initial.get(key, {}), value)
            initial[key] = r
        else:
            initial[key] = other[key]
    return initial
utils.py 文件源码 项目:sockeye 作者: awslabs 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def load_params(fname: str) -> Tuple[Dict[str, mx.nd.NDArray], Dict[str, mx.nd.NDArray]]:
    """
    Loads parameters from a file.

    :param fname: The file containing the parameters.
    :return: Mapping from parameter names to the actual parameters for both the arg parameters and the aux parameters.
    """
    save_dict = mx.nd.load(fname)
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params
type_util.py 文件源码 项目:pytypes 作者: Stewori 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_Generic_parameters(tp, generic_supertype):
    """tp must be a subclass of generic_supertype.
    Retrieves the type values from tp that correspond to parameters
    defined by generic_supertype.

    E.g. get_Generic_parameters(tp, typing.Mapping) is equivalent
    to get_Mapping_key_value(tp) except for the error message.

    Note that get_Generic_itemtype(tp) is not exactly equal to
    get_Generic_parameters(tp, typing.Container), as that method
    additionally contains treatment for typing.Tuple and typing.Iterable.
    """
    try:
        res = _select_Generic_superclass_parameters(tp, generic_supertype)
    except TypeError:
        res = None
    if res is None:
        raise TypeError("%s has no proper parameters defined by %s."%
                (type_str(tp), type_str(generic_supertype)))
    else:
        return tuple(res)
seqdata.py 文件源码 项目:sk-torch 作者: mattHawthorn 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def from_vocab(cls, sequences: Map[int, Seq[H]], vocab: Vocabulary, max_len: int, pack_sequences: bool=False,
                   append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS, null_token: H=DEFAULT_NULL,
                   int_id_type: str='long', shuffle: bool=True):
        """
        :param vocab: instance of Vocabulary to use for encoding/decoding tokens
        :param max_len: maximum length of sequences to sample
        :param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
        :param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
        :param eos_token: string or hashable to append to mark end-of-sequence in encoding
        :param null_token: Optional hashable to use for padding sequences. Added to the vocab, unless none is passed
            and none is built, in which case this is considered to be an int id.
            Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
            The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
        """
        encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
                                        int_id_type=int_id_type)
        return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
                   null_token=null_token, shuffle=shuffle)
seqdata.py 文件源码 项目:sk-torch 作者: mattHawthorn 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def from_token2id(cls, sequences: Map[int, Seq[H]], token2id: Dict[H, int],
                      max_len: int, pack_sequences: bool=False,
                      append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS,
                      null_token: H=DEFAULT_NULL, oov_token: H=DEFAULT_OOV,
                      int_id_type: str='long', shuffle: bool=True):
        """
        :param token2id: mapping of tokens to int ids
        :param max_len: maximum length of sequences to sample
        :param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
        :param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
        :param oov_token: hashable to insert for out-of-vocab tokens when encoding
        :param eos_token: string or hashable to append to mark end-of-sequence in encoding
        :param null_token: Optional hashable to use for padding sequences. Added to the vocab, unless none is passed
            and none is built, in which case this is considered to be an int id.
            Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
            The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
        """
        vocab = Vocabulary.from_token2id(token2id, oov_token=oov_token)
        encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
                                        int_id_type=int_id_type)
        return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
                   null_token=null_token, shuffle=shuffle)
seqdata.py 文件源码 项目:sk-torch 作者: mattHawthorn 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def from_id2token(cls, sequences: Map[int, Seq[H]], id2token: Dict[H, int],
                      max_len: int, pack_sequences: bool=False,
                      append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS,
                      null_token: H=DEFAULT_NULL, oov_token: H=DEFAULT_OOV,
                      int_id_type: str='long', shuffle: bool=True):
        """
        :param id2token: mapping of int ids to tokens
        :param max_len: maximum length of sequences to sample
        :param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
        :param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
        :param oov_token: hashable to insert for out-of-vocab tokens when encoding
        :param eos_token: hashable to append to mark end-of-sequence in encoding
        :param null_token: hashable to use for padding sequences. Added to the vocab, unless none is passed
            and none is built, in which case this is considered to be an int id.
            Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
            The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
        """
        vocab = Vocabulary.from_id2token(id2token, oov_token=oov_token)
        encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
                                        int_id_type=int_id_type)
        return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
                   null_token=null_token, shuffle=shuffle)
validate.py 文件源码 项目:nirum-python 作者: spoqa 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def validate_type(data, type_):
    instance_check = False
    abstract_types = {typing.AbstractSet, typing.Sequence, typing.Mapping}
    if hasattr(type_, '__origin__') and type_.__origin__ in abstract_types:
        param_type = get_abstract_param_types(type_)
        imp_types = {
            typing.AbstractSet: collections.Set,
            typing.Sequence: collections.Sequence,
            typing.Mapping: collections.Mapping,
        }
        instance_check = isinstance(data, imp_types[type_.__origin__]) and \
            all(isinstance(item, param_type[0]) for item in data)
    else:
        try:
            instance_check = isinstance(data, type_)
        except TypeError:
            if is_union_type(type_):
                instance_check = any(
                    isinstance(data, t) for t in get_union_types(type_)
                )
            else:
                raise ValueError('{!r} cannot validated.'.format(type_))
    return instance_check
deserialize.py 文件源码 项目:nirum-python 作者: spoqa 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def deserialize_abstract_type(cls, data):
    abstract_type_map = {
        typing.Sequence: list,
        typing.List: list,
        typing.Dict: dict,
        typing.Set: set,
        typing.AbstractSet: set,
    }
    cls_origin_type = cls.__origin__
    if cls_origin_type is None:
        cls_origin_type = cls
    iterable_types = {
        typing.Sequence, typing.List, typing.Tuple, typing.Set,
        typing.AbstractSet, typing.Mapping,
    }
    if cls_origin_type in iterable_types:
        return deserialize_iterable_abstract_type(cls, cls_origin_type, data)
    else:
        return abstract_type_map[cls_origin_type](data)
datavalue.py 文件源码 项目:wikidata 作者: dahlia 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self,
                 client: Client,
                 datatype: str,
                 datavalue: Mapping[str, object]) -> object:
        try:
            type_ = datavalue['type']
        except KeyError:
            raise DatavalueError('no "type" specified', datavalue)
        assert isinstance(type_, str)
        if 'value' not in datavalue:
            raise DatavalueError('no "value" field', datavalue)
        method_name = '{}__{}'.format(datatype, type_).replace('-', '_')
        method = getattr(self, method_name, None)
        if callable(method):
            return method(client, datavalue)
        method_name = type_.replace('-', '_')
        method = getattr(self, method_name, None)
        if callable(method):
            return method(client, datavalue)
        raise DatavalueError('{!r} is unsupported type'.format(type_),
                             datavalue)
meta.py 文件源码 项目:auDeep 作者: auDeep 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def label_map(self) -> Optional[Mapping[str, float]]:
        """
        Returns the mapping of nominal labels to numeric labels for the data set.

        This method delegates to the selected parser.

        Returns
        -------
        map of str to int
            The mapping of nominal labels to numeric labels

        Raises
        ------
        IOError
            If the data set cannot be parsed
        """
        if not self.can_parse():
            raise IOError("unable to parse data set at {}".format(self._basedir))

        return self._parser.label_map
facade.py 文件源码 项目:python-libjuju 作者: juju 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def do_explode(self, kind):
        if kind in basic_types or type(kind) is typing.TypeVar:
            return False
        if not issubclass(kind, (typing.Sequence,
                                 typing.Mapping)):
            self.clear()
            self.extend(Args(kind))
            return True
        return False
multiple.py 文件源码 项目:Hanabi-AI 作者: MeGotsThis 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self,
                 username: str,
                 password: str,
                 botModule: str,
                 botconfig: Mapping,
                 *args,
                 **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.username: str = username
        self.password: str = password
        module = importlib.import_module(botModule + '.bot')
        self.botCls: Type[Bot] = module.Bot  # type: ignore
        self.botconfig: Mapping = botconfig
        self.conn: socketIO_client.SocketIO
        self.game: Optional[Game] = None


问题


面经


文章

微信
公众号

扫码关注公众号