def test_add_another_offset(self):
topic_1 = uuidutils.generate_uuid()
partition_1 = random.randint(0, 1024)
until_offset_1 = random.randint(0, sys.maxsize)
from_offset_1 = random.randint(0, sys.maxsize)
app_name_1 = uuidutils.generate_uuid()
offset_key_1 = "%s_%s_%s" % (app_name_1, topic_1, partition_1)
my_batch_time = self.get_dummy_batch_time()
used_values = {}
self.kafka_offset_specs.add(topic=topic_1, partition=partition_1,
app_name=app_name_1,
from_offset=from_offset_1,
until_offset=until_offset_1,
batch_time_info=my_batch_time)
used_values[offset_key_1] = {
"topic": topic_1, "partition": partition_1, "app_name": app_name_1,
"from_offset": from_offset_1, "until_offset": until_offset_1
}
kafka_offset_specs = self.kafka_offset_specs.get_kafka_offsets(
app_name_1)
offset_value_1 = kafka_offset_specs.get(offset_key_1)
self.assertions_on_offset(used_value=used_values.get(offset_key_1),
offset_value=offset_value_1)
self.assertEqual(1,
len(self.kafka_offset_specs.get_kafka_offsets(
app_name_1)))
python类maxsize()的实例源码
def launch(self, cfg, path, flags):
logging.debug("Determine the OS and Architecture this application is currently running on")
hostOS = platform.system().lower()
logging.debug("hostOS: " + str(hostOS))
is_64bits = sys.maxsize > 2 ** 32
if is_64bits:
hostArchitecture = 'x64'
else:
hostArchitecture = 'ia32'
logging.debug("hostArchitecture: " + str(hostArchitecture))
if(self.validateConfig(cfg, hostOS, hostArchitecture)):
fnull = open(os.devnull, 'w')
if os.environ.get("WPW_HOME") is not None:
cmd = [os.environ["WPW_HOME"] + '/bin/rpc-agent-' + platform.system().lower() + '-' + self.detectHostArchitecture()]
else:
cmd = [path + '/wpwithinpy/iot-core-component/bin/rpc-agent-' + platform.system().lower() + '-' + self.detectHostArchitecture()]
cmd.extend(flags)
proc = subprocess.Popen(cmd, stdin=None, stdout=fnull, stderr=subprocess.STDOUT)
return proc
else:
logging.debug("Invalid OS/Architecture combination detected")
def detectHostArchitecture(self):
"""Return the architecture as '386', 'amd64', 'arm32' or 'arm64'."""
out = ''
if platform.machine().lower()[:3] == 'arm':
out += 'arm'
if sys.maxsize > 2 ** 32:
if out == 'arm':
out += '64'
else:
out = 'amd64'
else:
if out == 'arm':
out += '32'
else:
out = '386'
return out
def prune(self, min_freq=5, max_size=sys.maxsize):
"""returns new Vocab object, pruned based on minimum symbol frequency"""
pruned_vocab = Vocab(unk=self.unk, emb=self.emb)
cnt = 0
for sym, freq in sorted(self.sym2freqs.items(), key=operator.itemgetter(1), reverse=True):
# for sym in self.sym2freqs:
# freq = self.sym2freqs[sym]
cnt += 1
if freq >= min_freq and cnt < max_size:
pruned_vocab(sym)
pruned_vocab.sym2freqs[sym] = freq
if self.frozen:
# if original Vocab was frozen, freeze new one
pruned_vocab.freeze()
return pruned_vocab
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def detect_phantomjs(self):
logger.info('detecting phantomjs')
this_os = self.get_os().lower()
if 'windows' in this_os:
if os.path.isfile(self.home_dir + self.binary_win):
return self.home_dir + self.binary_win
else:
return False
elif 'linux' in this_os:
if sys.maxsize > 2 ** 32:
if os.path.isfile(self.home_dir + self.binary_linux64):
return self.home_dir + self.binary_linux64
else:
return False
else:
if os.path.isfile(self.home_dir + self.binary_linux32):
return self.home_dir + self.binary_linux32
else:
return False
else:
raise Exception('''
Platform not supported.
install phantomjs manualy and update the path in your config
''')
def download(self):
logger.info('downloading phantomjs')
this_os = self.get_os().lower()
base_url = 'https://bitbucket.org/ariya/phantomjs/downloads/'
if 'windows' in this_os:
file_name = 'phantomjs-2.1.1-windows.zip'
archive = 'zip'
elif 'linux' in this_os:
archive = 'tar.bz2'
if sys.maxsize > 2 ** 32:
file_name = 'phantomjs-2.1.1-linux-x86_64.tar.bz2'
else:
file_name = 'phantomjs-2.1.1-linux-i686.tar.bz2'
else:
raise Exception('''
Platform not supported.
install phantomjs manualy and update the path in your config
''')
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve(base_url + file_name, '/tmp/' + file_name)
self.unpack('/tmp/' + file_name, archive)
def typeoffsetof(self, BType, fieldname, num=0):
if isinstance(fieldname, str):
if num == 0 and issubclass(BType, CTypesGenericPtr):
BType = BType._BItem
if not issubclass(BType, CTypesBaseStructOrUnion):
raise TypeError("expected a struct or union ctype")
BField = BType._bfield_types[fieldname]
if BField is Ellipsis:
raise TypeError("not supported for bitfields")
return (BField, BType._offsetof(fieldname))
elif isinstance(fieldname, (int, long)):
if issubclass(BType, CTypesGenericArray):
BType = BType._CTPtr
if not issubclass(BType, CTypesGenericPtr):
raise TypeError("expected an array or ptr ctype")
BItem = BType._BItem
offset = BItem._get_size() * fieldname
if offset > sys.maxsize:
raise OverflowError
return (BItem, offset)
else:
raise TypeError(type(fieldname))
def _process_regex(cls, regex, rflags, state):
if isinstance(regex, words):
rex = regex_opt(regex.words, prefix=regex.prefix,
suffix=regex.suffix)
else:
rex = regex
compiled = re.compile(rex, rflags)
def match_func(text, pos, endpos=sys.maxsize):
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
t0 = time.time()
res = compiled.match(text, pos, endpos)
t1 = time.time()
info[0] += 1
info[1] += t1 - t0
return res
return match_func
def training(self):
training_dir = self._training_dir
hyperparam_path = os.path.join(training_dir, 'hyperparams-config.yaml')
model_weights_path = os.path.join(training_dir, 'model-weights.hdf5')
config_builder = config.FileConfigBuilder(hyperparam_path)
config_dict = config_builder.build_config()._asdict()
if self._config_override:
config_dict.update(self._config_override)
config_dict['time_limit'] = parse_timedelta(
config_dict['time_limit'])
if 'epochs' in self._config_override:
config_dict['time_limit'] = None
elif 'time_limit' in self._config_override:
config_dict['epochs'] = None
conf = config.Config(**config_dict)
model_weights_path = (model_weights_path if self._load_model_weights
else None)
return Training(training_label=self._new_training_label,
conf=conf,
model_weights_path=model_weights_path,
log_metrics_period=self._log_metrics_period,
explode_patience=sys.maxsize)
def build_config(self):
return Config(dataset_name='flickr8k',
epochs=None,
time_limit=timedelta(hours=10),
batch_size=32,
# As nearest as possible to 1.0, but must not be >= 1.0
reduce_lr_factor=1.0 - 1e-6,
reduce_lr_patience=sys.maxsize,
early_stopping_patience=sys.maxsize,
lemmatize_caption=True,
rare_words_handling='nothing',
words_min_occur=1,
learning_rate=0.001,
vocab_size=None,
embedding_size=300,
rnn_output_size=256,
dropout_rate=0.3,
bidirectional_rnn=False,
rnn_type='lstm',
rnn_layers=1,
l1_reg=0.0,
l2_reg=0.0,
initializer='glorot_uniform',
word_vector_init=None,
image_augmentation=False)
def build_config(self):
return Config(dataset_name='flickr8k',
epochs=None,
time_limit=timedelta(hours=24),
batch_size=32,
reduce_lr_factor=0.7,
reduce_lr_patience=4,
early_stopping_patience=sys.maxsize,
lemmatize_caption=True,
rare_words_handling='discard',
words_min_occur=5,
learning_rate=0.001,
vocab_size=None,
embedding_size=512,
rnn_output_size=512,
dropout_rate=0.3,
bidirectional_rnn=False,
rnn_type='lstm',
rnn_layers=1,
l1_reg=0.0,
l2_reg=0.0,
initializer='vinyals_uniform',
word_vector_init=None,
image_augmentation=False)
def __init__(self, fixed_config_keys):
super(CoarseRandomConfigBuilder, self).__init__(fixed_config_keys)
self._batch_size = lambda: 32
self._reduce_lr_factor = lambda: 1.0 - 1e-6
self._reduce_lr_patience = lambda: sys.maxsize
self._early_stopping_patience = lambda: 4
self._lemmatize_caption = lambda: True
self._rare_words_handling = lambda: 'nothing'
self._words_min_occur = lambda: 1
self._bidirectional_rnn = lambda: False
self._initializer = lambda: 'he_normal'
self._word_vector_init = lambda: None
self._learning_rate = lambda: 10 ** uniform(-6, -2)
self._dropout_rate = lambda: uniform(0, 1)
self._l1_reg = lambda: 10 ** uniform(-7, 0)
self._l2_reg = lambda: 10 ** uniform(-7, 0)
self._embedding_size = lambda: int(2 ** uniform(6, 9)) # [64, 512]
self._rnn_output_size = lambda: int(2 ** uniform(6, 9)) # [64, 512]
self._rnn_type = lambda: choice(['lstm', 'gru'])
self._rnn_layers = lambda: randint(1, 5)
def __init__(self, fixed_config_keys):
super(VinyalsRandomConfigBuilder, self).__init__(fixed_config_keys)
self._batch_size = lambda: 32
self._reduce_lr_factor = lambda: 1.0 - 1e-6
self._reduce_lr_patience = lambda: sys.maxsize
self._early_stopping_patience = lambda: 8
self._lemmatize_caption = lambda: True
self._rare_words_handling = lambda: 'discard'
self._words_min_occur = lambda: 5
self._bidirectional_rnn = lambda: False
self._initializer = lambda: 'vinyals_uniform'
self._word_vector_init = lambda: None
self._l1_reg = lambda: 0.0
self._l2_reg = lambda: 0.0
self._embedding_size = lambda: 512
self._rnn_output_size = lambda: 512
self._rnn_type = lambda: 'lstm'
self._rnn_layers = lambda: 1
self._learning_rate = lambda: 10**uniform(-4, -2)
self._dropout_rate = lambda: uniform(0.1, 0.6)
def __init__(self, fixed_config_keys):
super(Embed300RandomConfigBuilder, self).__init__(fixed_config_keys)
self._batch_size = lambda: 32
self._reduce_lr_factor = lambda: 1.0 - 1e-6
self._reduce_lr_patience = lambda: sys.maxsize
self._early_stopping_patience = lambda: 8
self._lemmatize_caption = lambda: True
self._rare_words_handling = lambda: 'discard'
self._words_min_occur = lambda: 5
self._bidirectional_rnn = lambda: False
self._initializer = lambda: 'glorot_uniform'
self._word_vector_init = lambda: choice(['glove', 'fasttext'])
self._l1_reg = lambda: 0.0
self._l2_reg = lambda: 0.0
self._embedding_size = lambda: 300
self._rnn_output_size = lambda: 300
self._rnn_type = lambda: 'lstm'
self._rnn_layers = lambda: randint(1, 2)
self._learning_rate = lambda: 10**uniform(-4, -2)
self._dropout_rate = lambda: uniform(0.1, 0.6)
def precisionbigmemtest(size, memuse, overhead=5*_1M):
def decorator(f):
def wrapper(self):
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if real_max_memuse and real_max_memuse < maxsize * memuse:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
return f(self, maxsize)
wrapper.size = size
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def test_array_type():
p = new_primitive_type("int")
assert repr(p) == "<ctype 'int'>"
#
py.test.raises(TypeError, new_array_type, new_pointer_type(p), "foo")
py.test.raises(ValueError, new_array_type, new_pointer_type(p), -42)
#
p1 = new_array_type(new_pointer_type(p), None)
assert repr(p1) == "<ctype 'int[]'>"
py.test.raises(ValueError, new_array_type, new_pointer_type(p1), 42)
#
p1 = new_array_type(new_pointer_type(p), 42)
p2 = new_array_type(new_pointer_type(p1), 25)
assert repr(p2) == "<ctype 'int[25][42]'>"
p2 = new_array_type(new_pointer_type(p1), None)
assert repr(p2) == "<ctype 'int[][42]'>"
#
py.test.raises(OverflowError,
new_array_type, new_pointer_type(p), sys.maxsize+1)
py.test.raises(OverflowError,
new_array_type, new_pointer_type(p), sys.maxsize // 3)
def test_struct_instance():
BInt = new_primitive_type("int")
BStruct = new_struct_type("struct foo")
BStructPtr = new_pointer_type(BStruct)
p = cast(BStructPtr, 0)
py.test.raises(AttributeError, "p.a1") # opaque
complete_struct_or_union(BStruct, [('a1', BInt, -1),
('a2', BInt, -1)])
p = newp(BStructPtr, None)
s = p[0]
assert s.a1 == 0
s.a2 = 123
assert s.a1 == 0
assert s.a2 == 123
py.test.raises(OverflowError, "s.a1 = sys.maxsize+1")
assert s.a1 == 0
py.test.raises(AttributeError, "p.foobar")
py.test.raises(AttributeError, "s.foobar")
def test_opaque_integer_as_function_result():
#import platform
#if platform.machine().startswith('sparc'):
# py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)')
#elif platform.machine() == 'mips64' and sys.maxsize > 2**32:
# py.test.skip('Segfaults on mips64el')
# XXX bad abuse of "struct { ...; }". It only works a bit by chance
# anyway. XXX think about something better :-(
ffi = FFI()
ffi.cdef("""
typedef struct { ...; } myhandle_t;
myhandle_t foo(void);
""")
lib = ffi.verify("""
typedef short myhandle_t;
myhandle_t foo(void) { return 42; }
""")
h = lib.foo()
assert ffi.sizeof(h) == ffi.sizeof("short")
def test_opaque_integer_as_function_result():
#import platform
#if platform.machine().startswith('sparc'):
# py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)')
#elif platform.machine() == 'mips64' and sys.maxsize > 2**32:
# py.test.skip('Segfaults on mips64el')
# XXX bad abuse of "struct { ...; }". It only works a bit by chance
# anyway. XXX think about something better :-(
ffi = FFI()
ffi.cdef("""
typedef struct { ...; } myhandle_t;
myhandle_t foo(void);
""")
lib = ffi.verify("""
typedef short myhandle_t;
myhandle_t foo(void) { return 42; }
""")
h = lib.foo()
assert ffi.sizeof(h) == ffi.sizeof("short")
def typeoffsetof(self, BType, fieldname, num=0):
if isinstance(fieldname, str):
if num == 0 and issubclass(BType, CTypesGenericPtr):
BType = BType._BItem
if not issubclass(BType, CTypesBaseStructOrUnion):
raise TypeError("expected a struct or union ctype")
BField = BType._bfield_types[fieldname]
if BField is Ellipsis:
raise TypeError("not supported for bitfields")
return (BField, BType._offsetof(fieldname))
elif isinstance(fieldname, (int, long)):
if issubclass(BType, CTypesGenericArray):
BType = BType._CTPtr
if not issubclass(BType, CTypesGenericPtr):
raise TypeError("expected an array or ptr ctype")
BItem = BType._BItem
offset = BItem._get_size() * fieldname
if offset > sys.maxsize:
raise OverflowError
return (BItem, offset)
else:
raise TypeError(type(fieldname))
def _sort_key(self):
"""Return a key for sorting SemanticVersion's on."""
# key things:
# - final is after rc's, so we make that a/b/rc/z
# - dev==None is after all other devs, so we use sys.maxsize there.
# - unqualified dev releases come before any pre-releases.
# So we do:
# (major, minor, patch) - gets the major grouping.
# (0|1) unqualified dev flag
# (a/b/rc/z) - release segment grouping
# pre-release level
# dev count, maxsize for releases.
rc_lookup = {'a': 'a', 'b': 'b', 'rc': 'rc', None: 'z'}
if self._dev_count and not self._prerelease_type:
uq_dev = 0
else:
uq_dev = 1
return (
self._major, self._minor, self._patch,
uq_dev,
rc_lookup[self._prerelease_type], self._prerelease,
self._dev_count or sys.maxsize)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
def _to_hsv(self, r, g, b):
h = 0
_sum = float(r + g + b)
_max = float(max([r, g, b]))
_min = float(min([r, g, b]))
diff = float(_max - _min)
if _sum == 0:
_sum = 0.0001
if _max == r:
if diff == 0:
h = sys.maxsize
else:
h = (g - b) / diff
elif _max == g:
h = 2 + ((g - r) / diff)
else:
h = 4 + ((r - g) / diff)
h *= 60
if h < 0:
h += 360
return [h, 1.0 - (3.0 * (_min / _sum)), (1.0 / 3.0) * _max]
def get_min_cut(cls, strs):
if not strs:
return 0
length = len(strs)
dp = [0 for _ in range(length+1)]
dp[length] = -1
is_palindrome = [[False for _ in range(length)] for _ in range(length)]
i = length - 1
while i >= 0:
dp[i] = sys.maxsize
j = i
while j < length:
if strs[i] == strs[j] and (j - i < 2 or is_palindrome[i+1][j-1]):
dp[i] = min([dp[i], dp[j+1]+1])
is_palindrome[i][j] = True
j += 1
i -= 1
return dp[0]
def min_coins_count_2(cls, arr, aim):
if not arr or aim < 0 or len(arr) == 0:
return -1
n = len(arr)
max_val = sys.maxsize
dp = [0 for _ in range(aim + 1)]
for i in range(1, aim + 1):
dp[i] = max_val
if arr[0] <= i and dp[i - arr[0]] != max_val:
dp[i] = dp[i - arr[0]] + 1
for i in range(1, n):
for j in range(1, aim + 1):
left = max_val
if arr[i] <= j and dp[j - arr[i]] != max_val:
left = dp[j - arr[i]] + 1
dp[j] = min([left, dp[j]])
return dp[aim] if dp[aim] != max_val else -1
def min_coins_count_3(cls, arr, aim):
if not arr or len(arr) == 0 or aim < 0:
return -1
n = len(arr)
max_val = sys.maxsize
dp = [[0 for _ in range(aim + 1)] for _ in range(n)]
for i in range(1, aim + 1):
dp[0][i] = max_val
if arr[0] == i:
dp[0][i] = 1
for i in range(1, n):
for j in range(1, aim + 1):
left = max_val
if arr[i] <= j and dp[i][j - arr[i]] != max_val:
left = dp[i][j - arr[i]] + 1
dp[i][j] = min([left, dp[i - 1][j]])
return dp[n - 1][aim] if dp[n - 1][aim] != max_val else -1
def min_coins_count_4(cls, arr, aim):
if not arr or len(arr) == 0 or aim < 0:
return -1
n = len(arr)
max_val = sys.maxsize
dp = [0 for _ in range(aim + 1)]
for i in range(1, aim + 1):
dp[i] = max_val
if i <= len(arr) - 1:
if arr[i] == i:
dp[i] = 1
for i in range(1, n):
for j in range(1, aim + 1):
left = max_val
if arr[i] <= j and dp[j - arr[i]] != max_val:
left = dp[j - arr[i]] + 1
dp[j] = min([left, dp[j]])
return dp[aim] if dp[aim] != max_val else -1
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def prune(self, min_freq=5, max_size=sys.maxsize):
"""returns new Vocab object, pruned based on minimum symbol frequency"""
pruned_vocab = Vocab(unk=self.unk, emb=self.emb)
cnt = 0
for sym, freq in sorted(self.sym2freqs.items(), key=operator.itemgetter(1), reverse=True):
# for sym in self.sym2freqs:
# freq = self.sym2freqs[sym]
cnt += 1
if freq >= min_freq and cnt < max_size:
pruned_vocab(sym)
pruned_vocab.sym2freqs[sym] = freq
if self.frozen:
# if original Vocab was frozen, freeze new one
pruned_vocab.freeze()
return pruned_vocab