def drawCard(self, game):
# Draws a random unused card and shuffles the deck if needed
totalDiscard = len(game['Discard'])
for member in game['Members']:
totalDiscard += len(member['Hand'])
if totalDiscard >= len(self.deck['whiteCards']):
# Tell everyone the cards were shuffled
for member in game['Members']:
if member['IsBot']:
continue
user = member['User']
await self.bot.send_message(user, 'Shuffling white cards...')
# Shuffle the cards
self.shuffle(game)
while True:
# Random grab a unique card
index = random.randint(0, len(self.deck['whiteCards'])-1)
if not index in game['Discard']:
game['Discard'].append(index)
text = self.deck['whiteCards'][index]
text = self.cleanJson(text)
card = { 'Index': index, 'Text': text }
return card
python类shuffle()的实例源码
def get_random_load_nonentry():
'''
Return a random item that probably isn't in match_func_result['load'].
'''
match_type = sys.argv[1]
if match_type == 'ipasn':
# Yes, we could do IPv6 here. But the type of the list doesn't matter:
# a random IPv4 might not be in an IPv4 list, and it won't be in an
# IPv6 list
random_32_bit = random.randint(0, 2**32 - 1)
ip = ipaddress.ip_address(random_32_bit)
return ip
else:
char_list = list(get_random_load_entry())
random.shuffle(char_list)
return "".join(char_list)
# try to make sure that other processes don't warp the results too much
def schedule_matches(self):
mySchedule = []
# for each home team
for home in range(len(self.clubs)):
for away in range(len(self.clubs)):
if home == away:
continue
# schedule will be in order at first
match = Match(self.clubs[home], self.clubs[away])
mySchedule.append(match)
# TODO: This will need majorly improved.
# Issue created on github
away += 1
# shuffle that schedule
random.shuffle(mySchedule)
# set current schedule to the new one
self.schedule = mySchedule
def hill_climbling_first_choice(status):
'''??????????????????????????????????????
??????????
'''
global chess_status_count
pos = [(x, y) for x in range(8) for y in range(8)]
random.shuffle(pos)
for col, row in pos:
if status[col] == row:
continue
chess_status_count += 1
status_copy = list(status)
status_copy[col] = row
if get_num_of_conglict(status_copy) < get_num_of_conglict(status):
status[col] = row
return status
return status
def hill_climbling_first_choice(status):
'''??????????????????????????????????????
??????????
'''
global chess_status_count
pos = [(x, y) for x in range(8) for y in range(8)]
random.shuffle(pos)
for col, row in pos:
if status[col] == row:
continue
chess_status_count += 1
status_copy = list(status)
status_copy[col] = row
if get_num_of_conglict(status_copy) < get_num_of_conglict(status):
status[col] = row
return status
return status
def reproduce(n = None, dataset = 'RCT', rand_shuffle = None, num_it = 3, split = None):
"""
read save_ss files
reproduce evaluation
"""
filename = 'save_ss_' + dataset + ' ' + str(n) + '_' + str(rand_shuffle)
f = open(filename, 'r')
(tc_dic, mv_dic, vs_diag_dic, vs_full_dic) = pickle.load(f)
start.main(dataset)
lc = crowd_model.labels_collection(start.turk_data_id, start.rel)
gold_dic = lc.get_true_ss()
random.shuffle(start.turk_data_id, lambda : rand_shuffle)
random.shuffle(start.rel, lambda : rand_shuffle)
test_data = (start.turk_data_id[split:], start.rel[split:])
print n
print "tc ", eval_cm(tc_dic, gold_dic, True, test_data)
print "mv ", eval_cm(mv_dic, gold_dic, True, test_data)
print "vs Full_Cov = False ", eval_cm(vs_diag_dic, gold_dic, True, test_data)
print "vs Full_Cov = True " , eval_cm(vs_full_dic, gold_dic, True, test_data)
f.close()
def setup(dataset = 'proton-beam', n = 1000, ngold = 0, rand_shuffle = None):
start.main(dataset)
if rand_shuffle != None:
random.shuffle(start.turk_data_id, lambda : rand_shuffle)
random.shuffle(start.rel, lambda : rand_shuffle)
lc_gold = crowd_model.labels_collection(start.turk_data_id, start.rel)
gold_dic = lc_gold.get_true_ss()
lc1 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
tc = crowd_model.tc_model(lc1)
lc2 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
mv = crowd_model.mv_model(lc2)
lc3 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
vs_full = crowd_model.vss_model(lc3, full_cov = True)
lc4 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
vs_diag = crowd_model.vss_model(lc3, full_cov = False)
return (gold_dic, mv, tc, vs_full, vs_diag)
def get_balance_d():
n = len(rel)
a = np.arange(n)
np.random.shuffle(a)
n0 = 0; n1 = 0; indices = []
for i in a:
x = rel[i]
if n0 < n1 and x == 1: continue
if n1 < n0 and x == 0: continue
indices.append(i)
if x == 0: n0 += 1
if x == 1: n1 += 1
global bal_mat, bal_rel, bal_turk_data, bal_turk_data_uncer, bal_turk_data_id
bal_mat = mat[indices]
bal_rel = [rel[i] for i in indices]
#bal_turk_data = [turk_data[i] for i in indices]
#bal_turk_data_uncer = [turk_data_uncer[i] for i in indices]
bal_turk_data_id = [turk_data_id[i] for i in indices]
def read_wafdir():
try:
os.listdir('waflib')
except:
raise ImportError('please provide a waflib directory in the current folder')
d = 'waflib'
lst = [d + os.sep + x for x in os.listdir(d) if x.endswith('.py')]
e = d + os.sep + 'Tools'
lst.extend([e + os.sep + x for x in os.listdir(e) if x.endswith('.py')])
f = d + os.sep + 'extras'
lst.extend([f + os.sep + x for x in os.listdir(f) if x.endswith('.py')])
random.shuffle(lst)
#lst.sort()
return lst
def read_wafdir():
try:
os.listdir('waflib')
except:
raise ImportError('please provide a waflib directory in the current folder')
d = 'waflib'
lst = [d + os.sep + x for x in os.listdir(d) if x.endswith('.py')]
e = d + os.sep + 'Tools'
lst.extend([e + os.sep + x for x in os.listdir(e) if x.endswith('.py')])
f = d + os.sep + 'extras'
lst.extend([f + os.sep + x for x in os.listdir(f) if x.endswith('.py')])
random.shuffle(lst)
#lst.sort()
return lst
def read_wafdir():
try:
os.listdir('waflib')
except:
raise ImportError('please provide a waflib directory in the current folder')
d = 'waflib'
lst = [d + os.sep + x for x in os.listdir(d) if x.endswith('.py')]
e = d + os.sep + 'Tools'
lst.extend([e + os.sep + x for x in os.listdir(e) if x.endswith('.py')])
f = d + os.sep + 'extras'
lst.extend([f + os.sep + x for x in os.listdir(f) if x.endswith('.py')])
random.shuffle(lst)
#lst.sort()
return lst
def get_training_data(self, include_last_batch=False):
""" Get shuffled training samples. Called at the beginning of each epoch.
"""
# TODO: Speed up: Use variable size batches (different max length).
train_ids = range(len(self.train_sents))
random.shuffle(train_ids)
if not include_last_batch:
num_batches = len(train_ids) // self.batch_size
train_ids = train_ids[:num_batches * self.batch_size]
num_samples = len(self.train_sents)
tensors = [self.train_tensors[t] for t in train_ids]
batched_tensors = [tensors[i: min(i+self.batch_size, num_samples)]
for i in xrange(0, num_samples, self.batch_size)]
results = [zip(*t) for t in batched_tensors]
print("Extracted {} samples and {} batches.".format(num_samples, len(batched_tensors)))
return results
def makeQ(self):
nums = [10, 50] # creating array of percentges
num1 = random.choice(nums) # choosing random percentage
nums2 = [10, 20, 40, 100]
print
num2 = random.choice(nums2)
q1 = ("What is {0} percent of {1}?").format(num1, num2) # question string
i = 0
options = []
while (i<4):
options.append(random.randint(0,100))
i+=1
a1 = int((num1 / 100.0) * num2) # num1 is the percentage, which should mutltiply by num2
options.append(a1)
random.shuffle(options)
print("Choose the correct answer: {0}").format(options)
return q1, a1, options
def makeQ(self):
nums = [10, 20, 40, 80] # creating array of percentges
num1 = random.choice(nums) # choosing random percentage
nums2 = [10, 20, 40, 100]
print
num2 = random.choice(nums2)
q1 = ("What is {0} percent of {1} ").format(num1, num2) # question string
i = 0
options = []
while (i<4):
options.append(random.randint(0,100))
i+=1
a1 = int((num1 / 100.0) * num2) # num1 is the percentage, which should mutltiply by num2
options.append(a1)
random.shuffle(options)
print("Choose the correct answer: {0}").format(options)
return q1, a1, options
#Subclass of Monster class for geometry-related monsters
def makeQ(self):
nums1 = [1,2,5,10] #creating array of numbers to multiply
num1 = random.choice(nums1) #choosing random number to multiply
nums2 = [1,2,3,4,5,6,7,8,9,10]
num2 = random.choice(nums2)
q1 = ("What is {0} multiplied by {1}? ").format(num1, num2) #question string
a1 = int( num1 * num2 ) #What is num1 times num2
i = 0
options = []
while (i<4):
options.append(random.randint(1, 100))
i+=1
options.append(a1)
random.shuffle(options)
print("Choose the correct answer: {0}").format(options)
return q1, a1, options
def makeQ(self):
nums1 = [2,3,4,5,6,7,8,9,10] #creating array of numbers to multiply
num1 = random.choice(nums1) #choosing random number to multiply
nums2 = [2,3,4,5,6,7,8,9,10]
num2 = random.choice(nums2)
q1 = ("What is {0} multiplied by {1}? ").format(num1, num2) #question string
a1 = int( num1 * num2 ) #What is num1 times num2
i = 0
options = []
while (i<4):
options.append(random.randint(1,100))
i+=1
options.append(a1)
random.shuffle(options)
print("Choose the correct answer: {0}").format(options)
return q1, a1, options
def createData():
spwords = [unidecode(a.lower()) for a in set(nltk.corpus.cess_esp.words()) if len(a)>3]
enwords = [a.lower() for a in set(nltk.corpus.brown.words()) if len(a)>3]
jpwords = [unidecode(a) for a in jeita.words() if (len(unidecode(a)) and unidecode(a)[0].islower())]
jpwords = [a for a in set(jpwords) if len(a)>3]
# minLen = min(len(enwords), len(spwords), len(jpwords))
featuresets = \
[(createTupleDict(w,numChars),'English') for w in enwords] + \
[(createTupleDict(w,numChars),'Spanish') for w in spwords] + \
[(createTupleDict(w,numChars),'Japanese') for w in jpwords]
random.shuffle(featuresets)
l=int(len(featuresets)*0.8)
training_set = featuresets[:l]
testing_set = featuresets[l:]
return (training_set, testing_set)
def _split_train_tst(self):
"""
divide the data into training and testing data
Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst
Note that only the reviews are changed, and not the summary.
:return: None
"""
num_samples = self.Y.shape[0]
mapper_file = self.checkpointer.get_mapper_file_location()
if not self.checkpointer.is_mapper_checkpointed():
print 'No mapper checkpoint found. Fresh loading in progress ...'
# Now shuffle the data
sample_id = range(num_samples)
random.shuffle(sample_id)
print 'Dumping the mapper shuffle for reuse.'
Pickle.dump(sample_id, open(mapper_file, 'wb'))
print 'Dump complete. Moving Forward...'
else:
print 'Mapper Checkpoint found... Reading from mapper dump'
sample_id = Pickle.load(open(mapper_file, 'rb'))
print 'Mapping unpickling complete.. Moving forward...'
self.X_fwd = self.X_fwd[sample_id]
self.X_bwd = self.X_bwd[sample_id]
self.Y = self.Y[sample_id]
# Now divide the data into test ans train set
test_fraction = 0.01
self.test_size = int(test_fraction * num_samples)
self.train_size = num_samples - self.test_size
# Forward review
self.X_trn_fwd = self.X_fwd[0:self.train_size]
self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]
# Backward review
self.X_trn_bwd = self.X_bwd[0:self.train_size]
self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]
# Summary
self.Y_trn = self.Y[0:self.train_size]
self.Y_tst = self.Y[self.train_size:num_samples]
def _split_train_tst(self):
"""
divide the data into training and testing data
Create the X_trn, X_tst, and Y_trn and Y_tst
Note that only the reviews are changed, and not the summary.
:return: None
"""
num_samples = self.Y.shape[0]
mapper_file = self.checkpointer.get_mapper_file_location()
if not self.checkpointer.is_mapper_checkpointed():
print 'No mapper checkpoint found. Fresh loading in progress ...'
# Now shuffle the data
sample_id = range(num_samples)
random.shuffle(sample_id)
print 'Dumping the mapper shuffle for reuse.'
Pickle.dump(sample_id, open(mapper_file, 'wb'))
print 'Dump complete. Moving Forward...'
else:
print 'Mapper Checkpoint found... Reading from mapper dump'
sample_id = Pickle.load(open(mapper_file, 'rb'))
print 'Mapping unpickling complete.. Moving forward...'
self.X = self.X[sample_id]
self.Y = self.Y[sample_id]
# Now divide the data into test ans train set
test_fraction = 0.01
self.test_size = int(test_fraction * num_samples)
self.train_size = num_samples - self.test_size
# review
self.X_trn = self.X[0:self.train_size]
self.X_tst = self.X[self.train_size:num_samples]
# Summary
self.Y_trn = self.Y[0:self.train_size]
self.Y_tst = self.Y[self.train_size:num_samples]