def __init__(self, gamma, cache=Cache, normalizator=Normalizator, cache_subproducts=False):
self.gamma = gamma
self.cache = cache(self)
self.normalizator = normalizator
self.cache_subproducts = cache_subproducts
python类Cache()的实例源码
def testCacheWithPrefix(self):
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = Cache(s)
prefix = cache.with_prefix(cache.ram,'prefix')
self.assertEqual(prefix('a', lambda: 1, 0), 1)
self.assertEqual(prefix('a', lambda: 2, 100), 1)
self.assertEqual(cache.ram('prefixa', lambda: 2, 100), 1)
def testDALcache(self):
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = Cache(s)
db = DAL(check_reserved=['all'])
db.define_table('t_a', Field('f_a'))
db.t_a.insert(f_a='test')
db.commit()
a = db(db.t_a.id > 0).select(cache=(cache.ram, 60), cacheable=True)
b = db(db.t_a.id > 0).select(cache=(cache.ram, 60), cacheable=True)
self.assertEqual(a.as_csv(), b.as_csv())
c = db(db.t_a.id > 0).select(cache=(cache.disk, 60), cacheable=True)
d = db(db.t_a.id > 0).select(cache=(cache.disk, 60), cacheable=True)
self.assertEqual(c.as_csv(), d.as_csv())
self.assertEqual(a.as_csv(), c.as_csv())
self.assertEqual(b.as_csv(), d.as_csv())
e = db(db.t_a.id > 0).select(cache=(cache.disk, 60))
f = db(db.t_a.id > 0).select(cache=(cache.disk, 60))
self.assertEqual(e.as_csv(), f.as_csv())
self.assertEqual(a.as_csv(), f.as_csv())
g = db(db.t_a.id > 0).select(cache=(cache.ram, 60))
h = db(db.t_a.id > 0).select(cache=(cache.ram, 60))
self.assertEqual(g.as_csv(), h.as_csv())
self.assertEqual(a.as_csv(), h.as_csv())
db.t_a.drop()
db.close()
def test_CacheWithPrefix(self):
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = Cache(s)
prefix = cache.with_prefix(cache.ram, 'prefix')
self.assertEqual(prefix('a', lambda: 1, 0), 1)
self.assertEqual(prefix('a', lambda: 2, 100), 1)
self.assertEqual(cache.ram('prefixa', lambda: 2, 100), 1)
def test_DALcache(self):
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = Cache(s)
db = DAL(check_reserved=['all'])
db.define_table('t_a', Field('f_a'))
db.t_a.insert(f_a='test')
db.commit()
a = db(db.t_a.id > 0).select(cache=(cache.ram, 60), cacheable=True)
b = db(db.t_a.id > 0).select(cache=(cache.ram, 60), cacheable=True)
self.assertEqual(a.as_csv(), b.as_csv())
c = db(db.t_a.id > 0).select(cache=(cache.disk, 60), cacheable=True)
d = db(db.t_a.id > 0).select(cache=(cache.disk, 60), cacheable=True)
self.assertEqual(c.as_csv(), d.as_csv())
self.assertEqual(a.as_csv(), c.as_csv())
self.assertEqual(b.as_csv(), d.as_csv())
e = db(db.t_a.id > 0).select(cache=(cache.disk, 60))
f = db(db.t_a.id > 0).select(cache=(cache.disk, 60))
self.assertEqual(e.as_csv(), f.as_csv())
self.assertEqual(a.as_csv(), f.as_csv())
g = db(db.t_a.id > 0).select(cache=(cache.ram, 60))
h = db(db.t_a.id > 0).select(cache=(cache.ram, 60))
self.assertEqual(g.as_csv(), h.as_csv())
self.assertEqual(a.as_csv(), h.as_csv())
db.t_a.drop()
db.close()
def test_cache(self):
"""test cache
"""
@cache.Cache()
def return_value():
return time.time()
value=return_value()
self.assertEqual(value, return_value())
def test_cache_passthrough(self):
"""test cache pass through
"""
@cache.Cache()
def return_value(value):
return time.time()
value=return_value(0)
self.assertNotEqual(value, return_value(1))
def test_cache_timeout(self):
"""test cache time out
"""
@cache.Cache(timeout=1)
def return_value(value):
return time.time()
value=return_value(0)
time.sleep(2)
self.assertNotEqual(value, return_value(0))
def import_data(self, head, rows):
"""Imports data"""
self.connect()
self.cache = Cache()
cursor = self.conn.cursor()
sql = """INSERT INTO importactions (remote_addr, remote_user, sender_addr, sender_user, ia_when) VALUES (%s, %s, %s, %s, %s)"""
cursor.execute(sql, [
environ.get("REMOTE_ADDR", ""), environ.get("REMOTE_USER", ""),
head["sender_addr"],head["sender_user"],
datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
])
importactionid = cursor.lastrowid
for row in rows:
for key in self.column_table_mapping:
self.fill_id_cache(cursor, key, row, row[key])
for row in rows:
sql = """INSERT IGNORE INTO checkins(type, ci_when, whoid, repositoryid, dirid, fileid, revision, branchid, addedlines, removedlines, descid, stickytag, commitid, importactionid)
VALUE (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.execute(self.rewrite_sql(sql), [
row["type"],
row["ci_when"],
self.cache.get("who", row["who"]),
self.cache.get("repository", row["repository"]),
self.cache.get("dir", row["dir"]),
self.cache.get("file", row["file"]),
row["revision"],
self.cache.get("branch", row["branch"]),
row["addedlines"],
row["removedlines"],
self.cache.get("description", row["description"]),
"",
self.cache.get("hash", row["commitid"]),
str(importactionid)
])
cursor.close()
self.disconnect()
def main():
try:
if args.conf:
log.info("Reading conf from {}".format(args.conf))
with open(args.conf, 'r') as pipes_file:
conf = json.load(pipes_file,
object_pairs_hook=collections.OrderedDict)
simulators = []
cache = Cache(args.db)
admin_user = cache["users"]["admin"]
admin_user["auth_url"] = cache["api"]["auth_url"]
admin_factory = ClientFactory(admin_user)
admin_keeper = Keeper(cache, admin_factory)
if args.clean:
log.info("Starting cleanup")
admin_keeper.clean(args.clean)
sys.exit()
# This section for default initialization of cirros image
log.debug("Caching default cirros image")
(cache["glance"]["images"]
[admin_keeper.get(
"glance", "images", "name",
lambda x: x == "cirros-0.3.4-x86_64-uec")[0].id]) = False
for flavor in admin_factory.nova().flavors.list():
log.debug("Caching flavor with name {name}".
format(name=flavor.name))
(cache["nova"]["flavors"][flavor.id]) = False
for pipe_name, pipe in conf.iteritems():
simulators.append(Simulator(pipe_name, pipe, cache, admin_keeper))
for simulator in simulators:
simulator.simulate()
except KeyboardInterrupt:
print('\nThe process was interrupted by the user')
raise SystemExit
def testCacheWithPrefix(self):
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = Cache(s)
prefix = cache.with_prefix(cache.ram,'prefix')
self.assertEqual(prefix('a', lambda: 1, 0), 1)
self.assertEqual(prefix('a', lambda: 2, 100), 1)
self.assertEqual(cache.ram('prefixa', lambda: 2, 100), 1)
def testDALcache(self):
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = Cache(s)
db = DAL(check_reserved=['all'])
db.define_table('t_a', Field('f_a'))
db.t_a.insert(f_a='test')
db.commit()
a = db(db.t_a.id > 0).select(cache=(cache.ram, 60), cacheable=True)
b = db(db.t_a.id > 0).select(cache=(cache.ram, 60), cacheable=True)
self.assertEqual(a.as_csv(), b.as_csv())
c = db(db.t_a.id > 0).select(cache=(cache.disk, 60), cacheable=True)
d = db(db.t_a.id > 0).select(cache=(cache.disk, 60), cacheable=True)
self.assertEqual(c.as_csv(), d.as_csv())
self.assertEqual(a.as_csv(), c.as_csv())
self.assertEqual(b.as_csv(), d.as_csv())
e = db(db.t_a.id > 0).select(cache=(cache.disk, 60))
f = db(db.t_a.id > 0).select(cache=(cache.disk, 60))
self.assertEqual(e.as_csv(), f.as_csv())
self.assertEqual(a.as_csv(), f.as_csv())
g = db(db.t_a.id > 0).select(cache=(cache.ram, 60))
h = db(db.t_a.id > 0).select(cache=(cache.ram, 60))
self.assertEqual(g.as_csv(), h.as_csv())
self.assertEqual(a.as_csv(), h.as_csv())
db.t_a.drop()
db.close()
def init(_args,_root):
global args
args = _args
global db
db = Database(clean=args.clean)
global postCache
postCache = Cache()
global commentCache
commentCache = Cache()
global messageCache
messageCache = Cache()
global userNotifCache
userNotifCache = Cache()
global root
root = _root
global sim
sim = simulation.Simulation(clean=args.clean)
global conf
conf = {"startingPoints":0}