本文整理汇总了Python中util.chunks函数的典型用法代码示例。如果您正苦于以下问题:Python chunks函数的具体用法?Python chunks怎么用?Python chunks使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了chunks函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: generate
def generate(config, dnat=False, test=True):
public_ip = config["public_ip"]
current_ip = config["base_ip"]
dnsmasq_content = ""
for group in config["groups"].values():
if not dnat:
c = chunks([proxy["domain"] for proxy in group["proxies"]], 5)
else:
c = chunks([proxy["domain"] for proxy in group["proxies"] if proxy["dnat"]], 5)
for chunk in c:
if not dnat:
dnsmasq_content += generate_dns(chunk, public_ip)
else:
dnsmasq_content += generate_dns(chunk, current_ip)
if test:
if not dnat:
dnsmasq_content += generate_dns('ptest.verdandi.is', public_ip)
dnsmasq_content += generate_dns('ptest2.verdandi.is', public_ip)
else:
dnsmasq_content += generate_dns('ptest.verdandi.is', current_ip)
dnsmasq_content += generate_dns('ptest2.verdandi.is', current_ip)
if dnat:
for group in config["groups"].values():
for proxy in group["proxies"]:
if not proxy["dnat"]:
current_ip = long2ip(ip2long(current_ip) + 1)
dnsmasq_content += generate_dns(proxy["domain"], current_ip)
return dnsmasq_content
开发者ID:carriercomm,项目名称:dnsproxy,代码行数:32,代码来源:dnsmasq.py
示例2: train
def train(self, X_train, X_val):
train_true = filter(lambda x: x[2]==1, X_train)
train_false = filter(lambda x: x[2]==0, X_train)
val_true = filter(lambda x: x[2]==1, X_val)
val_false = filter(lambda x: x[2]==0, X_val)
n_train_true = len(train_true)
n_val_true = len(val_true)
make_epoch_helper = functools.partial(make_epoch, train_true=train_true, train_false=train_false, val_true=val_true, val_false=val_false)
logging.info("Starting training...")
epoch_iterator = ParallelBatchIterator(make_epoch_helper, range(P.N_EPOCHS), ordered=False, batch_size=1, multiprocess=False, n_producers=1)
for epoch_values in epoch_iterator:
self.pre_epoch()
train_epoch_data, val_epoch_data = epoch_values
train_epoch_data = util.chunks(train_epoch_data, P.BATCH_SIZE_TRAIN)
val_epoch_data = util.chunks(val_epoch_data, P.BATCH_SIZE_VALIDATION)
self.do_batches(self.train_fn, train_epoch_data, self.train_metrics)
self.do_batches(self.val_fn, val_epoch_data, self.val_metrics)
self.post_epoch()
logging.info("Setting learning rate to {}".format(P.LEARNING_RATE * ((0.985)**self.epoch)))
self.l_r.set_value(P.LEARNING_RATE * ((0.985)**self.epoch))
开发者ID:gzuidhof,项目名称:luna16,代码行数:29,代码来源:fr3dnet_trainer.py
示例3: collect_tweets_by_ids
def collect_tweets_by_ids(tweet_ids_config_filepath, output_folder, config):
apikeys = list(config['apikeys'].values()).pop()
tweet_ids_config = {}
with open(os.path.abspath(tweet_ids_config_filepath), 'r') as tweet_ids_config_rf:
tweet_ids_config = json.load(tweet_ids_config_rf)
max_range = 100
current_ix = tweet_ids_config['current_ix'] if ('current_ix' in tweet_ids_config) else 0
total = len(tweet_ids_config['tweet_ids'][current_ix:])
tweet_id_chuncks = util.chunks(tweet_ids_config['tweet_ids'][current_ix:], max_range)
for tweet_ids in tweet_id_chuncks:
try:
twitterCralwer = TwitterCrawler(apikeys=apikeys, client_args=CLIENT_ARGS, output_folder = output_folder)
twitterCralwer.lookup_tweets_by_ids(tweet_ids)
current_ix += len(tweet_ids)
except Exception as exc:
logger.error(exc)
logger.error(util.full_stack()) #don't care, if Ctrl+c is hit, does not handle it. When you restart, it restarts from the last chunk (too much trouble to handle Ctrl + c).
# you will get duplicate tweets, so what...
pass
tweet_ids_config['current_ix'] = current_ix
flash_cmd_config(tweet_ids_config, tweet_ids_config_filepath, output_folder)
logger.info('COMPLETED -> (current_ix: [%d/%d])'%(current_ix, total))
logger.info('PAUSE %ds to CONTINUE...'%WAIT_TIME)
time.sleep(WAIT_TIME)
else:
logger.info('[tweets_by_ids] ALL COMPLETED')
开发者ID:bianjiang,项目名称:tweetf0rm,代码行数:35,代码来源:twitter_tracker.py
示例4: decode
def decode(self, server, block_header, target, job_id = None, extranonce2 = None):
if block_header:
job = Object()
binary_data = block_header.decode('hex')
data0 = np.zeros(64, np.uint32)
data0 = np.insert(data0, [0] * 16, unpack('IIIIIIIIIIIIIIII', binary_data[:64]))
job.target = np.array(unpack('IIIIIIII', target.decode('hex')), dtype=np.uint32)
job.header = binary_data[:68]
job.merkle_end = np.uint32(unpack('I', binary_data[64:68])[0])
job.time = np.uint32(unpack('I', binary_data[68:72])[0])
job.difficulty = np.uint32(unpack('I', binary_data[72:76])[0])
job.state = sha256(STATE, data0)
job.f = np.zeros(8, np.uint32)
job.state2 = partial(job.state, job.merkle_end, job.time, job.difficulty, job.f)
job.targetQ = 2**256 / int(''.join(list(chunks(target, 2))[::-1]), 16)
job.job_id = job_id
job.extranonce2 = extranonce2
job.server = server
calculateF(job.state, job.merkle_end, job.time, job.difficulty, job.f, job.state2)
if job.difficulty != self.difficulty:
self.set_difficulty(job.difficulty)
return job
开发者ID:AngelMarc,项目名称:poclbm-1,代码行数:27,代码来源:Switch.py
示例5: decode
def decode(self, server, block_header, target, job_id = None, extranonce2 = None):
if block_header:
job = Object()
binary_data = block_header.decode('hex')
#data0 = list(unpack('<16I', binary_data[:64])) + ([0] * 48)
job.headerX = binary_data[:76]
job.dataX = unpack('<19I', job.headerX)
job.target = unpack('<8I', target.decode('hex'))
job.header = binary_data[:68]
job.merkle_end = uint32(unpack('<I', binary_data[64:68])[0])
job.time = uint32(unpack('<I', binary_data[68:72])[0])
job.difficulty = uint32(unpack('<I', binary_data[72:76])[0])
# job.state = sha256(STATE, data0)
job.targetQ = 2**256 / int(''.join(list(chunks(target, 2))[::-1]), 16)
job.job_id = job_id
job.extranonce2 = extranonce2
job.server = server
if job.difficulty != self.difficulty:
self.set_difficulty(job.difficulty)
return job
开发者ID:snoopcode,项目名称:poclbm-skc,代码行数:25,代码来源:Switch.py
示例6: getstatusforfids
def getstatusforfids(self, fids):
status = {}
for chunk in chunks(fids, 50):
for f in arlalow.fetchbulkstatus(self.fsconn, chunk):
status[f["fid"]] = f["status"]
return status
开发者ID:ahltorp,项目名称:afssync,代码行数:7,代码来源:afsutil.py
示例7: extract_all_labels
def extract_all_labels(filenames, out_filepath=DATA_FOLDER+'labels.p', chunk_size=2000):
print "EXTRACTING ALL LABELS INTO {0}".format(out_filepath)
all_labels = []
label_dict = {}
filenames_chunks = util.chunks(filenames, chunk_size)
for i, chunk in enumerate(filenames_chunks):
pool = Pool(processes=util.CPU_COUNT)
chunk_labels = pool.map(extract_labels, chunk)
pool.close()
for filepath, labels in zip(chunk, chunk_labels):
if labels is not None:
file_id = util.filename_without_extension(filepath)
label_dict[file_id] = labels
all_labels += labels
print i+1, '/', len(filenames_chunks)
#Write labels to file
with open(out_filepath,'w') as f:
pickle.dump(label_dict, f)
print '\nLabels:'
print len(set(all_labels))
print Counter(all_labels)
开发者ID:gzuidhof,项目名称:text-mining,代码行数:27,代码来源:extract.py
示例8: __call__
def __call__(self, message, state=None, *, pad=True):
state = state or self.initial_state
prepared_message = message + (self.padding(len(message)) if pad else b"")
assert len(prepared_message) % self.block_size == 0
for block in chunks(prepared_message, self.block_size):
state = self.compress(state, block)
return state
开发者ID:mikez302,项目名称:cryptopals_solutions,代码行数:7,代码来源:merkle_damgard.py
示例9: predict
def predict(self, data, modes):
"""predict whether a list of position follows atrain route by detecting
the nearest train stops. Input is the pandas data frame of
measurements and an array of current mode predictions. Returns
an array of predicted modes of the same size as the input data
frame has rows.
"""
# extract lat/lon from data frame
lat = data['WLATITUDE'].values
lon = data['WLONGITUDE'].values
# chunk is a tuple (start_idx, end_idx, mode)
for start_idx, end_idx, _ in ifilter(lambda chunk: chunk[2] in [MODE_CAR, MODE_BUS, MODE_TRAIN],
chunks(modes, include_values=True)):
# test for distance first
lat_seg = lat[start_idx:end_idx]
lon_seg = lon[start_idx:end_idx]
valid_lat_seg = lat_seg[np.where(np.invert(np.isnan(lat_seg)))[0]]
valid_lon_seg = lon_seg[np.where(np.invert(np.isnan(lon_seg)))[0]]
if len(valid_lon_seg) == 0:
continue
# TODO: parameters have to be tuned carefully
is_train = predict_mode_by_location(valid_lat_seg,
valid_lon_seg,
self.train_location_tree,
self.train_location_dict,
self.train_route_dict,
dist_thre = 400,
dist_pass_thres = 7,
num_stops_thre = 3,
dist_pass_thres_perc = 0.7)
#check entry point distance
entry_pt_near = -1
exit_pt_near = -1
if start_idx-1>=0:
if not np.isnan(lat[start_idx-1]):
nearest_station = find_nearest_station(lat[start_idx-1], lon[start_idx-1], self.train_location_tree, self.dist_thres_entry_exit)
if len(nearest_station)!=0:
entry_pt_near = 1
else:
entry_pt_near = 0
if end_idx < len(modes):
if not np.isnan(lat[end_idx]):
nearest_station = find_nearest_station(lat[end_idx],lon[end_idx],
self.train_location_tree,
self.dist_thres_entry_exit)
if len(nearest_station)!=0:
exit_pt_near = 1
else:
exit_pt_near = 0
if is_train or entry_pt_near + exit_pt_near == 2:
modes[start_idx:end_idx] = MODE_TRAIN
else:
modes[start_idx:end_idx] = MODE_CAR
return modes
开发者ID:SUTDMEC,项目名称:NSE_Validation,代码行数:60,代码来源:TransitHeuristic.py
示例10: crack_ecb_oracle
def crack_ecb_oracle(oracle_fn, prefix_length=0):
block_size = guess_block_size(oracle_fn)
if not looks_like_ecb(oracle_fn(b"A" * 100), block_size):
raise ValueError("oracle_fn does not appear to produce ECB mode output")
result = bytearray()
while True:
short_block_length = (block_size - len(result) - 1 - prefix_length) % block_size
short_input_block = b"A" * short_block_length
block_index = (len(result) + prefix_length) // block_size
block_to_look_for = chunks(oracle_fn(short_input_block))[block_index]
for guess in all_bytes_by_frequency:
test_input = short_input_block + result + bytes([guess])
if chunks(oracle_fn(test_input))[block_index] == block_to_look_for:
result.append(guess)
break
else: # if no byte matches
return pkcs7_unpad(result)
开发者ID:mikez302,项目名称:cryptopals_solutions,代码行数:17,代码来源:block_tools.py
示例11: cluster_to_kml
def cluster_to_kml(user, cluster, cluster_id):
"""
Creates a single, or possibly multiple KML files a given cluster.
A KML file is limited by MyMaps to having only 10 layers, so only
10 sections will be in a given KML file.
Responsibilty of caller to check existence and formatting of cluster
"""
Sections = get_section_db()
for i,chunk in enumerate(chunks(cluster,10)):
sections = map(lambda section_id: Sections.find_one({'_id':section_id}), chunk)
sections_to_kml("%s_cluster_data_kml/CLUSTER_%s_%i" % (user, str(cluster_id), i), sections)
开发者ID:sfwatergit,项目名称:e-mission-server,代码行数:12,代码来源:truth_pipeline.py
示例12: add_text
def add_text(self, text):
if len(text) + len(self._lines[self.point[0]]) > self.draw_width:
self.point_to_next_line()
if len(text) > self.draw_width:
lines_to_add = chunks(text, self.draw_width)
lines_to_advance = len(lines_to_add)
for line in lines_to_add:
self._lines.append(line)
self.adjust_point_by_lines(lines_to_advance)
else:
self._lines[self.point[0]] += text
self.point_to_end_of_line()
开发者ID:chazu,项目名称:jinxes,代码行数:12,代码来源:buffers.py
示例13: cross_validation
def cross_validation(self, fold, epoch):
print 'doing cross validation...'
splited_data = list(chunks(self.data, fold))
hyper_test = defaultdict(int)
for idx, (train, test) in enumerate(splited_data):
for c in self.C:
for rho_0 in self.RHO_0:
weight = self.train(train, rho_0, c, epoch=epoch)
precision = self.test(test, weight)
print 'done fold %i' % idx, ' on [rho_0: %s, c: %s]' \
% (rho_0, c)
hyper_test[(rho_0, c)] += precision
return map(lambda (x, y): (x, y/fold), hyper_test.iteritems())
开发者ID:drstarry,项目名称:minimal,代码行数:13,代码来源:svm.py
示例14: start_producers
def start_producers(self, result_queue):
jobs = Queue()
n_workers = params.N_PRODUCERS
batch_count = 0
#Flag used for keeping values in queue in order
last_queued_job = Value('i', -1)
for job_index, batch in enumerate(util.chunks(self.X,self.batch_size)):
batch_count += 1
jobs.put( (job_index,batch) )
# Define producer (putting items into queue)
def produce(id):
while True:
job_index, task = jobs.get()
if task is None:
#print id, " fully done!"
break
result = self.gen(task)
while(True):
#My turn to add job done
if last_queued_job.value == job_index-1:
with last_queued_job.get_lock():
result_queue.put(result)
last_queued_job.value += 1
#print id, " worker PUT", job_index
break
#Start workers
for i in xrange(n_workers):
if params.MULTIPROCESS:
p = Process(target=produce, args=(i,))
else:
p = Thread(target=produce, args=(i,))
p.daemon = True
p.start()
#Add poison pills to queue (to signal workers to stop)
for i in xrange(n_workers):
jobs.put((-1,None))
return batch_count, jobs
开发者ID:StevenReitsma,项目名称:kaggle-diabetic-retinopathy,代码行数:50,代码来源:iterators.py
示例15: threshold_optimization
def threshold_optimization(p, y):
print "Optimizing threshold"
y_images = util.chunks(y, 384*512)
def dice_objective(threshold):
p_binary = np.where(p > threshold, 1,0)
p_images_binary = util.chunks(p_binary, 384*512)
mean, std, dices = dice(p_images_binary, y_images)
return -mean
x, v, message = scipy.optimize.fmin_l_bfgs_b(dice_objective, 0.5, approx_grad=True, bounds=[(0, 1)], epsilon=1e-03)
print "Optimized, threshold {0}, ? {1}, termination because {2}".format(x,v,message)
return x[0]
开发者ID:gzuidhof,项目名称:cad,代码行数:14,代码来源:learn.py
示例16: profile
def profile(subset=1000, multi=True, n_threads = 4, batch_size=64, thread_pool=False):
# Load a bunch of imagenames
y = util.load_labels()
y = y[:subset]
keys = y.index.values
#Create sublists (batches)
batched_keys = util.chunks(keys, batch_size)
if multi:
augment_multithreaded(batched_keys, n_threads=n_threads, thread_pool=thread_pool)
else:
augment_singlethreaded(batched_keys)
开发者ID:StevenReitsma,项目名称:kaggle-diabetic-retinopathy,代码行数:14,代码来源:multithread_augment.py
示例17: call
def call(self, orderlist):
assert isinstance(orderlist, list)
orders = {}
MAXORDERS = 50
for ol in util.chunks(orderlist, MAXORDERS):
# make BDAQ representation of orders from orderlist past
self.req.Orders.Order = self.makeorderlist(ol)
apilog.info('calling BDAQ Api PlaceOrdersNoReceipt')
result = self.client.service.PlaceOrdersNoReceipt(self.req)
ors = apiparse.ParsePlaceOrdersNoReceipt(result, orderlist)
orders.update(ors)
# note: could put result.Timestamp in order object so that we
# are saving the BDAQ time.
return orders
开发者ID:Susheng,项目名称:pybetdaq,代码行数:15,代码来源:apimethod.py
示例18: refresh_job
def refresh_job(self, j):
j.extranonce2 = self.increment_nonce(j.extranonce2)
coinbase = j.coinbase1 + self.extranonce + j.extranonce2 + j.coinbase2
merkle_root = sha256(sha256(unhexlify(coinbase)).digest()).digest()
for hash_ in j.merkle_branch:
merkle_root = sha256(sha256(merkle_root + unhexlify(hash_)).digest()).digest()
merkle_root_reversed = ''
for word in chunks(merkle_root, 4):
merkle_root_reversed += word[::-1]
merkle_root = hexlify(merkle_root_reversed)
j.block_header = ''.join([j.version, j.prevhash, merkle_root, j.ntime, j.nbits])
j.time = time()
return j
开发者ID:AngelMarc,项目名称:poclbm,代码行数:15,代码来源:StratumSource.py
示例19: status_iter
def status_iter(iterable, callback, chunksize=1, reportsize=10):
itersize = len(iterable)
starttime = time.time()
for i, item in enumerate(util.chunks(iterable, chunksize), 1):
callback(item)
if i % reportsize == 0:
done = i * chunksize
nowtime = time.time()
numblocks = itersize * 1.0 / (reportsize*chunksize)
curblock = done / (reportsize*chunksize)
position = curblock / numblocks
duration = round(nowtime - starttime)
durdelta = datetime.timedelta(seconds=duration)
remaining = round((duration / position) - duration)
remdelta = datetime.timedelta(seconds=remaining)
lookuplog.info("Done %s/%s in %s; %s remaining", done, itersize, str(durdelta), str(remdelta))
lookuplog.info("Finished")
开发者ID:MTG,项目名称:echonest-backup,代码行数:17,代码来源:lookup.py
示例20: nfold_cross_validate
def nfold_cross_validate(data, n=4):
data_chunks = chunks(data, len(data) / n)
rmse_values = []
for i in range(n):
train_set = flatten(data_chunks[:i] + data_chunks[i + 1:])
test_set = data_chunks[i]
classif = nltk.MaxentClassifier.train(train_set)
test_fs, test_ratings = zip(*test_set)
results = classif.batch_classify(test_fs)
set_rmse = rmse(test_ratings, results)
print 'RMSE: ', set_rmse
rmse_values.append(set_rmse)
print 'Average RMSE:', sum(rmse_values) / float(len(rmse_values))
开发者ID:slyngbaek,项目名称:wikiometer,代码行数:17,代码来源:classifier.py
注:本文中的util.chunks函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论