本文整理汇总了Python中multiprocessing.pool.close方法的典型用法代码示例。如果您正苦于以下问题:Python pool.close方法的具体用法?Python pool.close怎么用?Python pool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块multiprocessing.pool的用法示例。

在下文中一共展示了pool.close方法的24个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_no_import_lock_contention

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_no_import_lock_contention(self):

with test.support.temp_cwd():

module_name = 'imported_by_an_imported_module'

with open(module_name + '.py', 'w') as f:

f.write("""if 1:

import multiprocessing

q = multiprocessing.Queue()

q.put('knock knock')

q.get(timeout=3)

q.close()

del q

""")

with test.support.DirsOnSysPath(os.getcwd()):

try:

__import__(module_name)

except pyqueue.Empty:

self.fail("Probable regression on import lock contention;"

" see Issue #22853")

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:22,

示例2: test_unpickleable_result

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_unpickleable_result(self):

from multiprocessing.pool import MaybeEncodingError

p = multiprocessing.Pool(2)

# Make sure we don't lose pool processes because of encoding errors.

for iteration in range(20):

scratchpad = [None]

def errback(exc):

scratchpad[0] = exc

res = p.apply_async(unpickleable_result, error_callback=errback)

self.assertRaises(MaybeEncodingError, res.get)

wrapped = scratchpad[0]

self.assertTrue(wrapped)

self.assertIsInstance(scratchpad[0], MaybeEncodingError)

self.assertIsNotNone(wrapped.exc)

self.assertIsNotNone(wrapped.value)

p.close()

p.join()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:23,

示例3: test_pool_worker_lifetime_early_close

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_pool_worker_lifetime_early_close(self):

# Issue #10332: closing a pool whose workers have limited lifetimes

# before all the tasks completed would make join() hang.

p = multiprocessing.Pool(3, maxtasksperchild=1)

results = []

for i in range(6):

results.append(p.apply_async(sqr, (i, 0.3)))

p.close()

p.join()

# check the results

for (j, res) in enumerate(results):

self.assertEqual(res.get(), sqr(j))

#

# Test of creating a customized manager class

#

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:18,

示例4: test_large_fd_transfer

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_large_fd_transfer(self):

# With fd > 256 (issue #11657)

if self.TYPE != 'processes':

self.skipTest("only makes sense with processes")

conn, child_conn = self.Pipe(duplex=True)

p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))

p.daemon = True

p.start()

self.addCleanup(test.support.unlink, test.support.TESTFN)

with open(test.support.TESTFN, "wb") as f:

fd = f.fileno()

for newfd in range(256, MAXFD):

if not self._is_fd_assigned(newfd):

break

else:

self.fail("could not find an unassigned large file descriptor")

os.dup2(fd, newfd)

try:

reduction.send_handle(conn, newfd, p.pid)

finally:

os.close(newfd)

p.join()

with open(test.support.TESTFN, "rb") as f:

self.assertEqual(f.read(), b"bar")

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:27,

示例5: _listener

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def _listener(cls, conn, families):

for fam in families:

l = cls.connection.Listener(family=fam)

conn.send(l.address)

new_conn = l.accept()

conn.send(new_conn)

new_conn.close()

l.close()

l = socket.socket()

l.bind((test.support.HOST, 0))

l.listen()

conn.send(l.getsockname())

new_conn, addr = l.accept()

conn.send(new_conn)

new_conn.close()

l.close()

conn.recv()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:21,

示例6: test_timeout

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_timeout(self):

old_timeout = socket.getdefaulttimeout()

try:

socket.setdefaulttimeout(0.1)

parent, child = multiprocessing.Pipe(duplex=True)

l = multiprocessing.connection.Listener(family='AF_INET')

p = multiprocessing.Process(target=self._test_timeout,

args=(child, l.address))

p.start()

child.close()

self.assertEqual(parent.recv(), 123)

parent.close()

conn = l.accept()

self.assertEqual(conn.recv(), 456)

conn.close()

l.close()

p.join(10)

finally:

socket.setdefaulttimeout(old_timeout)

#

# Test what happens with no "if __name__ == '__main__'"

#

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:25,

示例7: get_high_socket_fd

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def get_high_socket_fd(self):

if WIN32:

# The child process will not have any socket handles, so

# calling socket.fromfd() should produce WSAENOTSOCK even

# if there is a handle of the same number.

return socket.socket().detach()

else:

# We want to produce a socket with an fd high enough that a

# freshly created child process will not have any fds as high.

fd = socket.socket().detach()

to_close = []

while fd < 50:

to_close.append(fd)

fd = os.dup(fd)

for x in to_close:

os.close(x)

return fd

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:19,

示例8: test_ignore

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_ignore(self):

conn, child_conn = multiprocessing.Pipe()

try:

p = multiprocessing.Process(target=self._test_ignore,

args=(child_conn,))

p.daemon = True

p.start()

child_conn.close()

self.assertEqual(conn.recv(), 'ready')

time.sleep(0.1)

os.kill(p.pid, signal.SIGUSR1)

time.sleep(0.1)

conn.send(1234)

self.assertEqual(conn.recv(), 1234)

time.sleep(0.1)

os.kill(p.pid, signal.SIGUSR1)

self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))

time.sleep(0.1)

p.join()

finally:

conn.close()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:23,

示例9: test_ignore_listener

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_ignore_listener(self):

conn, child_conn = multiprocessing.Pipe()

try:

p = multiprocessing.Process(target=self._test_ignore_listener,

args=(child_conn,))

p.daemon = True

p.start()

child_conn.close()

address = conn.recv()

time.sleep(0.1)

os.kill(p.pid, signal.SIGUSR1)

time.sleep(0.1)

client = multiprocessing.connection.Client(address)

self.assertEqual(client.recv(), 'welcome')

p.join()

finally:

conn.close()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:19,

示例10: _listener

​点赞 6

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def _listener(cls, conn, families):

for fam in families:

l = cls.connection.Listener(family=fam)

conn.send(l.address)

new_conn = l.accept()

conn.send(new_conn)

new_conn.close()

l.close()

l = socket.socket()

l.bind((test.support.HOST, 0))

l.listen(1)

conn.send(l.getsockname())

new_conn, addr = l.accept()

conn.send(new_conn)

new_conn.close()

l.close()

conn.recv()

开发者ID:IronLanguages,项目名称:ironpython3,代码行数:21,

示例11: scrape_pages

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def scrape_pages(pages):

for page in pages:

make_transient(page)

# free up the connection while doing net IO

db.session.close()

db.engine.dispose()

pool = get_worker_pool()

map_results = pool.map(scrape_with_timeout, pages, chunksize=1)

scraped_pages = [p for p in map_results if p]

logger.info(u'finished scraping all pages')

pool.close()

pool.join()

logger.info(u'preparing update records')

row_dicts = [x.__dict__ for x in scraped_pages]

for row_dict in row_dicts:

row_dict.pop('_sa_instance_state')

logger.info(u'saving update records')

db.session.bulk_update_mappings(PageNew, row_dicts)

scraped_page_ids = [p.id for p in scraped_pages]

return scraped_page_ids

# need to spawn processes from workers but can't do that if worker is daemonized

开发者ID:ourresearch,项目名称:oadoi,代码行数:30,

示例12: scrape_with_timeout

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def scrape_with_timeout(page):

pool = NDPool(processes=1)

async_result = pool.apply_async(scrape_page, (page,))

result = None

try:

result = async_result.get(timeout=600)

pool.close()

except TimeoutError:

logger.info(u'page scrape timed out: {}'.format(page))

pool.terminate()

pool.join()

return result

开发者ID:ourresearch,项目名称:oadoi,代码行数:15,

示例13: _fetch_stock_data

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def _fetch_stock_data(self, stock_list):

"""获取股票信息"""

pool = multiprocessing.pool.ThreadPool(len(stock_list))

try:

res = pool.map(self.get_stocks_by_range, stock_list)

finally:

pool.close()

return [d for d in res if d is not None]

开发者ID:shidenggui,项目名称:easyquotation,代码行数:10,

示例14: as_bulk_resolve

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def as_bulk_resolve(candidates, threads=50):

"""

Resolve a list of IPs to AS information.

Returns a map of each result as a tuple of (ASN, owner) keyed to

its candidate. Returns None if no ASN could be found or (ASN,

None) if an ASN was found but no owner is available.

WARNING: This function will create a pool of up to 'threads'

threads.

"""

result = {}

if not candidates:

return result

pool = multiprocessing.pool.ThreadPool(

processes=min(len(candidates), threads))

for ip, as_ in pool.imap(

__asresolve__,

candidates,

chunksize=1):

result[ip] = as_

pool.close()

return result

开发者ID:perfsonar,项目名称:pscheduler,代码行数:29,

示例15: run

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def run(self):

self.parent_conn.close()

for s in iter(self.child_conn.recv, None):

self.child_conn.send(s.upper())

self.child_conn.close()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:7,

示例16: stop

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def stop(self):

self.parent_conn.send(None)

self.parent_conn.close()

self.child_conn.close()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:6,

示例17: test_make_pool

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_make_pool(self):

self.assertRaises(ValueError, multiprocessing.Pool, -1)

self.assertRaises(ValueError, multiprocessing.Pool, 0)

p = multiprocessing.Pool(3)

self.assertEqual(3, len(p._pool))

p.close()

p.join()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:10,

示例18: test_empty_iterable

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_empty_iterable(self):

# See Issue 12157

p = self.Pool(1)

self.assertEqual(p.map(sqr, []), [])

self.assertEqual(list(p.imap(sqr, [])), [])

self.assertEqual(list(p.imap_unordered(sqr, [])), [])

self.assertEqual(p.map_async(sqr, []).get(), [])

p.close()

p.join()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:13,

示例19: test_pool_worker_lifetime

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_pool_worker_lifetime(self):

p = multiprocessing.Pool(3, maxtasksperchild=10)

self.assertEqual(3, len(p._pool))

origworkerpids = [w.pid for w in p._pool]

# Run many tasks so each worker gets replaced (hopefully)

results = []

for i in range(100):

results.append(p.apply_async(sqr, (i, )))

# Fetch the results and verify we got the right answers,

# also ensuring all the tasks have completed.

for (j, res) in enumerate(results):

self.assertEqual(res.get(), sqr(j))

# Refill the pool

p._repopulate_pool()

# Wait until all workers are alive

# (countdown * DELTA = 5 seconds max startup process time)

countdown = 50

while countdown and not all(w.is_alive() for w in p._pool):

countdown -= 1

time.sleep(DELTA)

finalworkerpids = [w.pid for w in p._pool]

# All pids should be assigned. See issue #7805.

self.assertNotIn(None, origworkerpids)

self.assertNotIn(None, finalworkerpids)

# Finally, check that the worker pids have changed

self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))

p.close()

p.join()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:30,

示例20: test_rapid_restart

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_rapid_restart(self):

authkey = os.urandom(32)

manager = QueueManager(

address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)

srvr = manager.get_server()

addr = srvr.address

# Close the connection.Listener socket which gets opened as a part

# of manager.get_server(). It's not needed for the test.

srvr.listener.close()

manager.start()

p = self.Process(target=self._putter, args=(manager.address, authkey))

p.daemon = True

p.start()

queue = manager.get_queue()

self.assertEqual(queue.get(), 'hello world')

del queue

manager.shutdown()

manager = QueueManager(

address=addr, authkey=authkey, serializer=SERIALIZER)

try:

manager.start()

except OSError as e:

if e.errno != errno.EADDRINUSE:

raise

# Retry after some time, in case the old socket was lingering

# (sporadic failure on buildbots)

time.sleep(1.0)

manager = QueueManager(

address=addr, authkey=authkey, serializer=SERIALIZER)

manager.shutdown()

#

#

#

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:37,

示例21: _echo

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def _echo(cls, conn):

for msg in iter(conn.recv_bytes, SENTINEL):

conn.send_bytes(msg)

conn.close()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:6,

示例22: _writefd

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def _writefd(cls, conn, data, create_dummy_fds=False):

if create_dummy_fds:

for i in range(0, 256):

if not cls._is_fd_assigned(i):

os.dup2(conn.fileno(), i)

fd = reduction.recv_handle(conn)

if msvcrt:

fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)

os.write(fd, data)

os.close(fd)

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:12,

示例23: test_multiple_bind

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def test_multiple_bind(self):

for family in self.connection.families:

l = self.connection.Listener(family=family)

self.addCleanup(l.close)

self.assertRaises(OSError, self.connection.Listener,

l.address, family)

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:8,

示例24: _test

​点赞 5

# 需要导入模块: from multiprocessing import pool [as 别名]

# 或者: from multiprocessing.pool import close [as 别名]

def _test(cls, address):

conn = cls.connection.Client(address)

conn.send('hello')

conn.close()

开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:6,

注:本文中的multiprocessing.pool.close方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

python中close_Python pool.close方法代码示例相关推荐

  1. python中shelf_Python cmds.shelfLayout方法代码示例

    本文整理汇总了Python中maya.cmds.shelfLayout方法的典型用法代码示例.如果您正苦于以下问题:Python cmds.shelfLayout方法的具体用法?Python cmds ...

  2. python中summary_Python summary_pb2.Summary方法代码示例

    本文整理汇总了Python中tensorflow.core.framework.summary_pb2.Summary方法的典型用法代码示例.如果您正苦于以下问题:Python summary_pb2 ...

  3. python中weekday_Python calendar.weekday方法代码示例

    本文整理汇总了Python中calendar.weekday方法的典型用法代码示例.如果您正苦于以下问题:Python calendar.weekday方法的具体用法?Python calendar. ...

  4. python中callable_Python abc.Callable方法代码示例

    本文整理汇总了Python中collections.abc.Callable方法的典型用法代码示例.如果您正苦于以下问题:Python abc.Callable方法的具体用法?Python abc.C ...

  5. python中permute_Python layers.Permute方法代码示例

    本文整理汇总了Python中keras.layers.Permute方法的典型用法代码示例.如果您正苦于以下问题:Python layers.Permute方法的具体用法?Python layers. ...

  6. python中rcparams_Python pylab.rcParams方法代码示例

    # 需要导入模块: from matplotlib import pylab [as 别名] # 或者: from matplotlib.pylab import rcParams [as 别名] d ...

  7. python中opener_Python request.build_opener方法代码示例

    # 需要导入模块: from six.moves.urllib import request [as 别名] # 或者: from six.moves.urllib.request import bu ...

  8. python cpu count_Python multiprocessing.cpu_count方法代码示例

    本文整理汇总了Python中multiprocessing.cpu_count方法的典型用法代码示例.如果您正苦于以下问题:Python multiprocessing.cpu_count方法的具体用 ...

  9. python torch exp_Python torch.add方法代码示例

    本文整理汇总了Python中torch.add方法的典型用法代码示例.如果您正苦于以下问题:Python torch.add方法的具体用法?Python torch.add怎么用?Python tor ...

最新文章

  1. STL系列:map和unordered_map
  2. 五大微信小程序开发IDE深度评测
  3. python中classmethod的用法_Python中的@classmethod是如何使用的?
  4. bootstrap php zend,Zend Framework教程之Bootstrap类用法概述
  5. 牛客国庆集训派对Day6
  6. 「PKUWC2018」Slay the Spire
  7. pytorch中gather函数的理解
  8. 我的世界服务器按键显示mode,【服务器相关】【求助!】关于服务器中使用gamemode等命令错误。...
  9. 基于Tkinter和百度Aip的人体关键点检测
  10. java并发编程之thread.join()方法详解
  11. java集合性能测试,关于Map和List的性能测试
  12. 【前端JS】input textarea 默认文字,点击消失
  13. 关于VC句柄的一种可爱的解释
  14. 五线谱音名和组别对照表_五线谱最全知识及符号! 太实用了,100%收藏!!!...
  15. PLSQL的快捷键以及使用技巧
  16. 第 4 篇、Linux操作基础 | 计算机组成
  17. 基于OpenStack Ironic与DPU的网易数帆裸金属方案实践
  18. 计算机二级15年大纲,2015年下半年全国计算机二级考试MSoffice高级应用大纲
  19. 初学者入门网络安全学哪种编程语言好?
  20. KnockoutJS的使用及分析

热门文章

  1. 四:es聚和函数Aggregations
  2. jquery 视觉特效(新闻滚动浏览)
  3. 宝塔解压文件,通过SSH命令解压缩.tar.gz、.gz、.zip文件的方法
  4. 双色球预测的一次尝试——前期准备与初步实施
  5. [转载]python库收集贴
  6. vue技术分享ppt_胡中南:Web端GIS技术新进展 | GTC专题论坛报告(视频+PPT+速记)
  7. 关于翻译的两篇好文章
  8. java第四方聚合支付,2017年第四方和第三方聚合支付平台排名
  9. SQLite简介,C#调用SQLite
  10. sublime简要笔记