def update_testbench(self, lib, cell, parameters, sim_envs, config_rules, env_parameters):
# type: (str, str, Dict[str, str], Sequence[str], List[List[str]], List[List[Tuple[str, str]]]) -> None
"""Update the given testbench configuration.
Parameters
----------
lib : str
testbench library.
cell : str
testbench cell.
parameters : Dict[str, str]
testbench parameters.
sim_envs : Sequence[str]
list of enabled simulation environments.
config_rules : List[List[str]]
config view mapping rules, list of (lib, cell, view) rules.
env_parameters : List[List[Tuple[str, str]]]
list of param/value list for each simulation environment.
"""
pass
python类Sequence()的实例源码
def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):
# type: (str, str, str, Sequence[Any]) -> None
"""Create a batch of layouts.
Parameters
----------
lib_name : str
layout library name.
view_name : str
layout view name.
via_tech : str
via technology library name.
layout_list : Sequence[Any]
a list of layouts to create
"""
pass
def create_dut_layouts(self, lay_params_list, cell_name_list, temp_db):
# type: (Sequence[Dict[str, Any]], Sequence[str], TemplateDB) -> Sequence[Dict[str, Any]]
"""Create multiple layouts"""
if self.prj is None:
raise ValueError('BagProject instance is not given.')
cls_package = self.specs['layout_package']
cls_name = self.specs['layout_class']
lay_module = importlib.import_module(cls_package)
temp_cls = getattr(lay_module, cls_name)
temp_list, sch_params_list = [], []
for lay_params in lay_params_list:
template = temp_db.new_template(params=lay_params, temp_cls=temp_cls, debug=False)
temp_list.append(template)
sch_params_list.append(template.sch_params)
temp_db.batch_layout(self.prj, temp_list, cell_name_list)
return sch_params_list
def get_cells_in_library(self, lib_name):
# type: (str) -> Sequence[str]
"""Get a list of cells in the given library.
Returns an empty list if the given library does not exist.
Parameters
----------
lib_name : str
the library name.
Returns
-------
cell_list : Sequence[str]
a list of cells in the library
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
return self.impl_db.get_cells_in_library(lib_name)
def instantiate_schematic(self, lib_name, content_list, lib_path=''):
# type: (str, Sequence[Any], str) -> None
"""Create the given schematic contents in CAD database.
NOTE: this is BAG's internal method. TO create schematics, call batch_schematic() instead.
Parameters
----------
lib_name : str
name of the new library to put the schematic instances.
content_list : Sequence[Any]
list of schematics to create.
lib_path : str
the path to create the library in. If empty, use default location.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
self.impl_db.instantiate_schematic(lib_name, content_list, lib_path=lib_path)
def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):
# type: (str, str, str, Sequence[Any]) -> None
"""Create a batch of layouts.
Parameters
----------
lib_name : str
layout library name.
view_name : str
layout view name.
via_tech : str
via technology name.
layout_list : Sequence[Any]
a list of layouts to create
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
self.impl_db.instantiate_layout(lib_name, view_name, via_tech, layout_list)
def from_vocab(cls, sequences: Map[int, Seq[H]], vocab: Vocabulary, max_len: int, pack_sequences: bool=False,
append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS, null_token: H=DEFAULT_NULL,
int_id_type: str='long', shuffle: bool=True):
"""
:param vocab: instance of Vocabulary to use for encoding/decoding tokens
:param max_len: maximum length of sequences to sample
:param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
:param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
:param eos_token: string or hashable to append to mark end-of-sequence in encoding
:param null_token: Optional hashable to use for padding sequences. Added to the vocab, unless none is passed
and none is built, in which case this is considered to be an int id.
Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
"""
encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
int_id_type=int_id_type)
return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
null_token=null_token, shuffle=shuffle)
def from_id2token(cls, sequences: Map[int, Seq[H]], id2token: Dict[H, int],
max_len: int, pack_sequences: bool=False,
append_eos: bool=True, eos_token: Opt[H]=DEFAULT_EOS,
null_token: H=DEFAULT_NULL, oov_token: H=DEFAULT_OOV,
int_id_type: str='long', shuffle: bool=True):
"""
:param id2token: mapping of int ids to tokens
:param max_len: maximum length of sequences to sample
:param pack_sequences: bool indicating whether to return regular Tensors or PackedSequence instances.
:param int_id_type: string indicating the type of int ids to use. Must be a key of data.str_to_int_tensor_type.
:param oov_token: hashable to insert for out-of-vocab tokens when encoding
:param eos_token: hashable to append to mark end-of-sequence in encoding
:param null_token: hashable to use for padding sequences. Added to the vocab, unless none is passed
and none is built, in which case this is considered to be an int id.
Numpy aliases for integer types are valid, as well as 'long', 'short', 'byte', 'char'.
The default 'long' is recommended, as only LongTensors can be used to index Embeddings in pytorch.
"""
vocab = Vocabulary.from_id2token(id2token, oov_token=oov_token)
encoder = SequenceTensorEncoder(vocab, append_eos=append_eos, eos_token=eos_token, null_token=null_token,
int_id_type=int_id_type)
return cls(sequences=sequences, encoder=encoder, max_len=max_len, pack_sequences=pack_sequences,
null_token=null_token, shuffle=shuffle)
def assign_data(centroids: Sequence[Centroid], data: Iterable[Point]) -> Dict[Centroid, Sequence[Point]]:
'Assign data the closest centroid'
d : DefaultDict[Point, List[Point]] = defaultdict(list)
for point in data:
centroid: Point = min(centroids, key=partial(dist, point))
d[centroid].append(point)
return dict(d)
def compute_centroids(groups: Iterable[Sequence[Point]]) -> List[Centroid]:
'Compute the centroid of each group'
return [tuple(map(mean, transpose(group))) for group in groups]
def quality(labeled: Dict[Centroid, Sequence[Point]]) -> float:
'Mean value of squared distances from data to its assigned centroid'
return mean(dist(c, p) ** 2 for c, pts in labeled.items() for p in pts)
def do_explode(self, kind):
if kind in basic_types or type(kind) is typing.TypeVar:
return False
if not issubclass(kind, (typing.Sequence,
typing.Mapping)):
self.clear()
self.extend(Args(kind))
return True
return False
def ReturnMapping(cls):
# Annotate the method with a return Type
# so the value can be cast
def decorator(f):
@functools.wraps(f)
async def wrapper(*args, **kwargs):
nonlocal cls
reply = await f(*args, **kwargs)
if cls is None:
return reply
if 'error' in reply:
cls = CLASSES['Error']
if issubclass(cls, typing.Sequence):
result = []
item_cls = cls.__parameters__[0]
for item in reply:
result.append(item_cls.from_json(item))
"""
if 'error' in item:
cls = CLASSES['Error']
else:
cls = item_cls
result.append(cls.from_json(item))
"""
else:
result = cls.from_json(reply['response'])
return result
return wrapper
return decorator
def buildArray(self, obj, d=0):
# return a sequence from an array in the schema
if "$ref" in obj:
return Sequence[refType(obj)]
else:
kind = obj.get("type")
if kind and kind == "array":
items = obj['items']
return self.buildArray(items, d + 1)
else:
return Sequence[objType(obj)]
def iterchars(text):
# type: (str) -> Sequence[str]
return text
def make_execall(exe, cmdargs, stdindata=None, depfiles=()):
# type: (str, Sequence[str], Optional[bytes], Sequence[str]) -> ExeCall
"""If the result of a subprocess call depends on the content of files, their
filenames must be specified as depfiles to prevent stale cache results.
"""
return ExeCall(exe, cmdargs, stdindata, depfiles)
def iter_parallel_report(func, # type: Callable[..., Any]
args_lists, # type: Sequence[CallArgs]
ccmode=CC_PROCESSES):
# type: (...) -> Iterator[Union[ExeResult, ExcInfo]]
if ccmode == CC_OFF or len(args_lists) <= 1 or not multiprocessing:
for args, kwargs in args_lists:
yield func(*args, **kwargs)
return
processes = min(len(args_lists), multiprocessing.cpu_count())
if ccmode == CC_THREADS:
pool = multiprocessing.pool.ThreadPool(processes=processes)
else:
pool = multiprocessing.Pool(processes=processes, initializer=per_process_init)
try:
async_results = [pool.apply_async(func, args=args, kwds=kwargs)
for args, kwargs in args_lists]
pool.close()
while async_results:
try:
asyncres = async_results.pop(0)
yield asyncres.get()
except (KeyboardInterrupt, GeneratorExit):
raise
except Exception as e:
t, v, tb = sys.exc_info()
try:
# Report the textual traceback of the subprocess rather
# than this local exception which was triggered
# by the other side.
tb = e.traceback # type: ignore
except AttributeError:
pass
yield ExcInfo((t, v, tb))
except GeneratorExit:
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
raise
finally:
pool.join()
def iter_parallel(func, # type: Callable
args_lists, # type: Sequence[CallArgs]
ccmode=CC_PROCESSES):
# type: (...) -> Iterator[Any]
if not args_lists:
return
if ccmode != CC_OFF:
args_lists = [((func, args, kwargs), {}) for args, kwargs in args_lists]
wrappedfunc = tracebackwrapper
else:
wrappedfunc = func
for result in iter_parallel_report(wrappedfunc, args_lists, ccmode=ccmode):
if ccmode == CC_OFF:
yield result
else:
tbtext = None
try:
if isinstance(result, ExcInfo):
t, v, tb = result.exc_info
if not isinstance(tb, types.TracebackType):
tbtext = tb
tb = None
reraise(t, v, tb)
else:
yield result
except Exception:
if tbtext is not None:
raise Exception(tbtext)
else:
traceback.print_exc()
raise
# ----------------------------------------------------------------------
# The data types option and style.
def identify_language(self, filenames=(), language=None):
# type: (Sequence[str], Optional[str]) -> None
"""Identify the languages from the filenames extensions.
"""
if language is None:
exts = set([os.path.splitext(f)[1] for f in filenames])
for lang, extsdescription in UncrustifyFormatter.language_exts:
langexts = set(extsdescription.split())
if exts.issubset(langexts):
self.languages.append(lang)
else:
self.languages.append(language)
def attempt_acceptible(self, roundnr, prevdist, newdist):
# type: (int, Sequence[int], Sequence[int]) -> bool
if roundnr >= 3 and tuple(newdist) > tuple(prevdist):
# Makes things worse
return False
if roundnr >= 3 and tuple(newdist) >= tuple(prevdist):
# Does not improve things
return False
return True