本文整理汇总了Python中util.elapsed函数的典型用法代码示例。如果您正苦于以下问题:Python elapsed函数的具体用法?Python elapsed怎么用?Python elapsed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了elapsed函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: gets_a_pdf
def gets_a_pdf(link, base_url):
if is_purchase_link(link):
return False
absolute_url = get_link_target(link, base_url)
start = time()
with closing(requests.get(absolute_url, stream=True, timeout=5, verify=False)) as r:
if resp_is_pdf(r):
print u"http header says this is a PDF. took {}s from {}".format(elapsed(start), absolute_url)
return True
# some publishers send a pdf back wrapped in an HTML page using frames.
# this is where we detect that, using each publisher's idiosyncratic templates.
# we only check based on a whitelist of publishers, because downloading this whole
# page (r.content) is expensive to do for everyone.
if 'onlinelibrary.wiley.com' in absolute_url:
# = closed journal http://doi.org/10.1111/ele.12585
# = open journal http://doi.org/10.1111/ele.12587
if '<iframe' in r.content:
print u"this is a Wiley 'enhanced PDF' page. took {}s".format(elapsed(start))
return True
elif 'ieeexplore' in absolute_url:
# (this is a good example of one dissem.in misses)
# = open journal http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6740844
# = closed journal http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6045214
if '<frame' in r.content:
print u"this is a IEEE 'enhanced PDF' page. took {}s".format(elapsed(start))
return True
print u"we've decided this ain't a PDF. took {}s".format(elapsed(start))
return False
开发者ID:pombredanne,项目名称:sherlockoa,代码行数:33,代码来源:article.py
示例2: run_ica
def run_ica():
log('loading data')
start = util.now()
voxels, xdim, ydim, zdim = load_data()
log(' elapsed: {}'.format(util.elapsed(start)))
log('running independent component analysis')
start = util.now()
ica = decomposition.FastICA(n_components=64, max_iter=200)
sources = ica.fit_transform(voxels)
sources = to_dataframe(sources, load_subject_ids(), ['X{}'.format(i) for i in range(64)])
log(' elapsed: {}'.format(util.elapsed(start)))
log('calculating correlations between voxel and component time courses')
start = util.now()
correlations = []
for voxel in voxels.columns[:32]:
voxel = voxels[voxel]
max_correlation = 0
for source in sources.columns:
source = sources[source]
correlation = np.corrcoef(voxel, source)
if correlation > max_correlation:
max_correlation = correlation
correlations.append(max_correlation)
log(' elapsed: {}'.format(util.elapsed(start)))
开发者ID:rbrecheisen,项目名称:scripts,代码行数:27,代码来源:run_ica.py
示例3: build_granules
def build_granules(model):
'''build granules'''
model.granules = {}
for gid in model.granule_gids:
g = mkgranule(gid)
model.granules.update({gid : g})
elapsed('%d granules built'%int(pc.allreduce(len(model.granules),1)))
开发者ID:JustasB,项目名称:Mig3DTest,代码行数:7,代码来源:net_mitral_centric.py
示例4: mk_mitrals
def mk_mitrals(model):
''' Create all the mitrals specified by mitral_gids set.'''
model.mitrals = {}
for gid in model.mitral_gids:
m = mkmitral.mkmitral(gid)
model.mitrals.update({gid : m})
util.elapsed('%d mitrals created and connections to mitrals determined'%int(pc.allreduce(len(model.mitrals),1)))
开发者ID:JustasB,项目名称:Mig3DTest,代码行数:7,代码来源:determine_connections.py
示例5: load_campaign
def load_campaign(filename, campaign=None, limit=None):
with open("data/" + filename, "r") as f:
lines = f.read().split("\n")
print "found {} ORCID lines".format(len(lines))
print len(lines)
if limit:
lines = lines[:limit]
total_start = time()
row_num = 0
for line in lines:
row_num += 1
# can have # as comments
if line.startswith("#"):
print "skipping comment line"
continue
loop_start = time()
email = None
if "," in line:
(dirty_orcid, email, twitter) = line.split(",")
else:
dirty_orcid = line
try:
orcid_id = clean_orcid(dirty_orcid)
except NoOrcidException:
try:
print u"\n\nWARNING: no valid orcid_id in line {}; skipping\n\n".format(line)
except UnicodeDecodeError:
print u"\n\nWARNING: no valid orcid_id and line throws UnicodeDecodeError; skipping\n\n"
continue
my_person = Person.query.filter_by(orcid_id=orcid_id).first()
if my_person:
print u"row {}, already have person {}, skipping".format(row_num, orcid_id)
else:
print u"row {}, making person {}".format(row_num, orcid_id)
my_person = make_person(orcid_id, store_in_db=True)
my_person.campaign = campaign
my_person.email = email
my_person.twitter = twitter
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.orcid_id)
print "row {}: finished {} in {}s\n".format(row_num, orcid_id, elapsed(loop_start))
print "finished load_campaign on {} profiles in {}s\n".format(len(lines), elapsed(total_start))
开发者ID:Impactstory,项目名称:impactstory-tng,代码行数:56,代码来源:load_campaign.py
示例6: register_mitrals
def register_mitrals(model):
'''register mitrals'''
for gid in model.mitrals:
if h.section_exists("initialseg", model.mitrals[gid]):
s = model.mitrals[gid].initialseg
pc.set_gid2node(gid, rank)
pc.cell(gid, h.NetCon(s(1)._ref_v, None, sec=s))
if not mpiece_exists(gid): # must not be doing multisplit
wholemitral(gid, model.mitrals[gid])
elapsed('mitrals registered')
开发者ID:JustasB,项目名称:Mig3DTest,代码行数:10,代码来源:net_mitral_centric.py
示例7: update_fn
def update_fn(self, cls, method_name, objects, index=1):
# we are in a fork! dispose of our engine.
# will get a new one automatically
# if is pooling, need to do .dispose() instead
db.engine.dispose()
start = time()
num_obj_rows = len(objects)
# logger.info(u"{pid} {repr}.{method_name}() got {num_obj_rows} objects in {elapsed} seconds".format(
# pid=os.getpid(),
# repr=cls.__name__,
# method_name=method_name,
# num_obj_rows=num_obj_rows,
# elapsed=elapsed(start)
# ))
for count, obj in enumerate(objects):
start_time = time()
if obj is None:
return None
method_to_run = getattr(obj, method_name)
# logger.info(u"***")
logger.info(u"*** #{count} starting {repr}.{method_name}() method".format(
count=count + (num_obj_rows*index),
repr=obj,
method_name=method_name
))
method_to_run()
logger.info(u"finished {repr}.{method_name}(). took {elapsed} seconds".format(
repr=obj,
method_name=method_name,
elapsed=elapsed(start_time, 4)
))
# for handling the queue
if not (method_name == "update" and obj.__class__.__name__ == "Pub"):
obj.finished = datetime.datetime.utcnow().isoformat()
# db.session.merge(obj)
start_time = time()
commit_success = safe_commit(db)
if not commit_success:
logger.info(u"COMMIT fail")
logger.info(u"commit took {} seconds".format(elapsed(start_time, 2)))
db.session.remove() # close connection nicely
return None # important for if we use this on RQ
开发者ID:Impactstory,项目名称:sherlockoa,代码行数:54,代码来源:queue_main.py
示例8: build_net_round_robin
def build_net_round_robin(model, connection_file):
enter = h.startsw()
dc.mk_mitrals(model)
read_mconnection_info(model, connection_file)
dc.mk_gconnection_info(model)
model.gids = model.mitral_gids.copy()
model.gids.update(model.granule_gids)
register_mitrals(model)
build_granules(model)
register_granules(model)
build_synapses(model)
elapsed('build_net_round_robin')
if rank == 0: print "round robin setuptime ", h.startsw() - t_begin
开发者ID:JustasB,项目名称:Mig3DTest,代码行数:13,代码来源:net_mitral_centric.py
示例9: mk_gconnection_info
def mk_gconnection_info(model):
mk_gconnection_info_part1(model)
mk_gconnection_info_part2(model)
# # Save full network Mitral-Granule connections
# mitral2granule = {}
# for mgid in model.mitral_gids:
# mitral2granule.update({mgid: [gc[3] for gc in model.mconnections[mgid]]})
#
# import cPickle as pickle
# with open('mitral2granule.p', 'wb') as fp:
# pickle.dump(mitral2granule, fp)
util.elapsed('mk_gconnection_info (#granules = %d)'%int(pc.allreduce(len(model.granule_gids),1)))
开发者ID:JustasB,项目名称:Mig3DTest,代码行数:14,代码来源:determine_connections.py
示例10: scroll_through_all_dois
def scroll_through_all_dois(query_doi=None, first=None, last=None, today=False, week=False, chunk_size=1000):
# needs a mailto, see https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service
headers={"Accept": "application/json", "User-Agent": "mailto:[email protected]"}
if first:
base_url = "https://api.crossref.org/works?filter=from-created-date:{first},until-created-date:{last}&rows={rows}&select=DOI&cursor={next_cursor}"
else:
base_url = "https://api.crossref.org/works?filter=until-created-date:{last}&rows={rows}&select=DOI&cursor={next_cursor}"
next_cursor = "*"
has_more_responses = True
number_added = 0
while has_more_responses:
has_more_responses = False
start_time = time()
url = base_url.format(
first=first,
last=last,
rows=chunk_size,
next_cursor=next_cursor)
logger.info(u"calling url: {}".format(url))
resp = requests.get(url, headers=headers)
logger.info(u"getting crossref response took {} seconds. url: {}".format(elapsed(start_time, 2), url))
if resp.status_code != 200:
logger.info(u"error in crossref call, status_code = {}".format(resp.status_code))
return number_added
resp_data = resp.json()["message"]
next_cursor = resp_data.get("next-cursor", None)
if next_cursor:
next_cursor = quote(next_cursor)
if resp_data["items"] and len(resp_data["items"]) == chunk_size:
has_more_responses = True
dois_from_api = [clean_doi(api_raw["DOI"]) for api_raw in resp_data["items"]]
added_pubs = add_new_pubs_from_dois(dois_from_api)
if dois_from_api:
logger.info(u"got {} dois from api".format(len(dois_from_api)))
if added_pubs:
logger.info(u"{}: saved {} new pubs, including {}".format(
first, len(added_pubs), added_pubs[-2:]))
number_added += len(added_pubs)
logger.info(u"loop done in {} seconds".format(elapsed(start_time, 2)))
return number_added
开发者ID:Impactstory,项目名称:sherlockoa,代码行数:50,代码来源:put_crossref_in_db.py
示例11: update_fn
def update_fn(cls, method_name, obj_id_list, shortcut_data=None, index=1):
# we are in a fork! dispose of our engine.
# will get a new one automatically
db.engine.dispose()
start = time()
q = db.session.query(cls).options(orm.undefer('*')).filter(cls.id.in_(obj_id_list))
obj_rows = q.all()
num_obj_rows = len(obj_rows)
print "{repr}.{method_name}() got {num_obj_rows} objects in {elapsed}sec".format(
repr=cls.__name__,
method_name=method_name,
num_obj_rows=num_obj_rows,
elapsed=elapsed(start)
)
for count, obj in enumerate(obj_rows):
start_time = time()
if obj is None:
return None
method_to_run = getattr(obj, method_name)
print u"\n***\n{count}: starting {repr}.{method_name}() method".format(
count=count + (num_obj_rows*index),
repr=obj,
method_name=method_name
)
if shortcut_data:
method_to_run(shortcut_data)
else:
method_to_run()
print u"finished {repr}.{method_name}(). took {elapsed}sec".format(
repr=obj,
method_name=method_name,
elapsed=elapsed(start_time, 4)
)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail"
db.session.remove() # close connection nicely
return None # important for if we use this on RQ
开发者ID:Impactstory,项目名称:impactstory-tng,代码行数:49,代码来源:jobs.py
示例12: update_fn
def update_fn(cls, method_name, obj_id_list, shortcut_data=None):
# we are in a fork! dispose of our engine.
# will get a new one automatically
db.engine.dispose()
start = time()
q = db.session.query(cls).filter(cls.id.in_(obj_id_list))
if cls.__name__ == "Person":
q = q.options(person_load_options())
obj_rows = q.all()
num_obj_rows = len(obj_rows)
print "{repr}.{method_name}() got {num_obj_rows} objects in {elapsed}sec".format(
repr=cls.__name__,
method_name=method_name,
num_obj_rows=num_obj_rows,
elapsed=elapsed(start)
)
for obj in obj_rows:
start_time = time()
if obj is None:
return None
method_to_run = getattr(obj, method_name)
print u"\nstarting {repr}.{method_name}() method".format(
repr=obj,
method_name=method_name
)
if shortcut_data:
method_to_run(shortcut_data)
else:
method_to_run()
print u"finished {repr}.{method_name}(). took {elapsed}sec".format(
repr=obj,
method_name=method_name,
elapsed=elapsed(start_time, 4)
)
db.session.commit()
db.session.remove() # close connection nicely
return None # important for if we use this on RQ
开发者ID:cfirmo33,项目名称:depsy,代码行数:49,代码来源:jobs.py
示例13: set_data_for_all_products
def set_data_for_all_products(self, method_name, high_priority=False, include_products=None):
start_time = time()
threads = []
# use all products unless passed a specific set
if not include_products:
include_products = self.all_products
# start a thread for each product
for work in include_products:
method = getattr(work, method_name)
process = threading.Thread(target=method, args=[high_priority])
process.start()
threads.append(process)
# wait till all work is done
for process in threads:
process.join()
# now go see if any of them had errors
# need to do it this way because can't catch thread failures; have to check
# object afterwards instead to see if they logged failures
for work in include_products:
if work.error:
# don't print out doi here because that could cause another bug
# print u"setting person error; {} for product {}".format(work.error, work.id)
self.error = work.error
print u"finished {method_name} on {num} products in {sec}s".format(
method_name=method_name.upper(),
num = len(include_products),
sec = elapsed(start_time, 2)
)
开发者ID:ethanwhite,项目名称:impactstory-tng,代码行数:33,代码来源:person.py
示例14: harvest
def harvest(self, **kwargs): # pragma: no cover
"""Make HTTP requests to the OAI server.
:param kwargs: OAI HTTP parameters.
:rtype: :class:`sickle.OAIResponse`
"""
start_time = time()
for _ in range(self.max_retries):
if self.http_method == 'GET':
payload_str = "&".join("%s=%s" % (k,v) for k,v in kwargs.items())
url_without_encoding = u"{}?{}".format(self.endpoint, payload_str)
http_response = requests.get(url_without_encoding,
**self.request_args)
self.http_response_url = http_response.url
else:
http_response = requests.post(self.endpoint, data=kwargs,
**self.request_args)
self.http_response_url = http_response.url
if http_response.status_code == 503:
retry_after = self.RETRY_SECONDS
logger.info("HTTP 503! Retrying after %d seconds..." % retry_after)
sleep(retry_after)
else:
logger.info("took {} seconds to call pmh url: {}".format(elapsed(start_time), http_response.url))
http_response.raise_for_status()
if self.encoding:
http_response.encoding = self.encoding
return OAIResponse(http_response, params=kwargs)
开发者ID:Impactstory,项目名称:sherlockoa,代码行数:28,代码来源:endpoint.py
示例15: _grep_for_dep_lines
def _grep_for_dep_lines(self, query_str, include_globs, exclude_globs):
arg_list =['zipgrep', query_str, self.temp_file_name]
arg_list += include_globs
arg_list.append("-x")
arg_list += exclude_globs
start = time()
try:
print "Running zipgrep: '{}'".format(" ".join(arg_list))
self.dep_lines = subprocess32.check_output(
arg_list,
timeout=90
)
except subprocess32.CalledProcessError:
# heroku throws an error here when there are no dep lines to find.
# but it's fine. there just aren't no lines.
pass
except subprocess32.TimeoutExpired:
# too many files, we'll skip it and move on.
self.error = "grep_timeout"
pass
finally:
self.grep_elapsed = elapsed(start, 4)
#print "found these dep lines: {}".format(self.dep_lines)
print "finished dep lines search in {} sec".format(self.grep_elapsed)
开发者ID:cfirmo33,项目名称:depsy,代码行数:28,代码来源:zip_getter.py
示例16: add_repos_from_remote_csv
def add_repos_from_remote_csv(csv_url, language):
start = time()
print "going to go get file"
response = requests.get(csv_url, stream=True)
index = 0
for github_url in response.iter_lines(chunk_size=1000):
login, repo_name = login_and_repo_name_from_url(github_url)
if login and repo_name:
repo = GithubRepo(
login=login,
repo_name=repo_name,
language=language
)
print repo
db.session.merge(repo)
index += 1
if index % 1000 == 0:
db.session.commit()
print "flushing on index {index}, elapsed: {elapsed}".format(
index=index,
elapsed=elapsed(start))
db.session.commit()
开发者ID:otrarto,项目名称:depsy,代码行数:25,代码来源:github_repo.py
示例17: leaderboard
def leaderboard():
filters_dict = make_filters_dict(request.args)
page_size = request.args.get("page_size", "25")
start = time()
num_total, leaders = get_leaders(
filters=filters_dict,
page_size=int(page_size)
)
leaders_list = [leader.as_snippet for leader in leaders]
ret_dict = {
"num_returned": len(leaders_list),
"num_total": num_total,
"list": leaders_list,
"type": filters_dict["type"],
"filters": filters_dict
}
if "tag" in filters_dict:
tag_obj = Tags.query.filter(Tags.unique_tag==filters_dict["tag"]).first()
ret_dict["related_tags"] = tag_obj.related_tags
ret = json_resp_from_thing(ret_dict)
elapsed_time = elapsed(start)
ret.headers["x-elapsed"] = elapsed_time
return ret
开发者ID:otrarto,项目名称:depsy,代码行数:27,代码来源:views.py
示例18: check_pdf_urls
def check_pdf_urls(pdf_urls):
for url in pdf_urls:
make_transient(url)
# free up the connection while doing net IO
safe_commit(db)
db.engine.dispose()
req_pool = get_request_pool()
checked_pdf_urls = req_pool.map(get_pdf_url_status, pdf_urls, chunksize=1)
req_pool.close()
req_pool.join()
row_dicts = [x.__dict__ for x in checked_pdf_urls]
for row_dict in row_dicts:
row_dict.pop('_sa_instance_state')
db.session.bulk_update_mappings(PdfUrl, row_dicts)
start_time = time()
commit_success = safe_commit(db)
if not commit_success:
logger.info(u"COMMIT fail")
logger.info(u"commit took {} seconds".format(elapsed(start_time, 2)))
开发者ID:Impactstory,项目名称:sherlockoa,代码行数:25,代码来源:queue_pdf_url_check.py
示例19: get_search_query
def get_search_query(query):
start_time = time()
my_pubs = fulltext_search_title(query)
response = [my_pub.to_dict_search() for my_pub in my_pubs]
sorted_response = sorted(response, key=lambda k: k['score'], reverse=True)
elapsed_time = elapsed(start_time, 3)
return jsonify({"results": sorted_response, "elapsed_seconds": elapsed_time})
开发者ID:Impactstory,项目名称:sherlockoa,代码行数:7,代码来源:views.py
示例20: refresh
def refresh(self, high_priority=False):
print u"* refreshing {} ({})".format(self.orcid_id, self.full_name)
self.error = None
start_time = time()
try:
print u"** calling call_apis"
self.call_apis(high_priority=high_priority)
print u"** calling calculate"
self.calculate()
print u"** finished refreshing all {num} products for {orcid_id} ({name}) in {sec}s".format(
orcid_id=self.orcid_id,
name=self.full_name,
num=len(self.all_products),
sec=elapsed(start_time)
)
except (KeyboardInterrupt, SystemExit):
# let these ones through, don't save anything to db
raise
except requests.Timeout:
self.error = "requests timeout"
except OrcidDoesNotExist:
self.invalid_orcid = True
self.error = "invalid orcid"
except Exception:
logging.exception("refresh error")
self.error = "refresh error"
print u"in generic exception handler, so rolling back in case it is needed"
db.session.rollback()
finally:
self.updated = datetime.datetime.utcnow().isoformat()
if self.error:
print u"ERROR refreshing person {}: {}".format(self.id, self.error)
开发者ID:ethanwhite,项目名称:impactstory-tng,代码行数:35,代码来源:person.py
注:本文中的util.elapsed函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论