From b1e406d73844d5a30344ca8ac855fe850c52bc2f Mon Sep 17 00:00:00 2001 From: Dan McGee Date: Wed, 16 Nov 2011 12:49:17 -0600 Subject: reporead_inotify: spin up read_repo() in separate thread This prevents memory usage from ballooning to absolutely huge values, such as when multiple threads kick off at the same time. The bulk of our memory allocation obviously comes in these threads and not the main threads, so being able to isolate them in processes helps a lot. Signed-off-by: Dan McGee --- devel/management/commands/reporead_inotify.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'devel/management') diff --git a/devel/management/commands/reporead_inotify.py b/devel/management/commands/reporead_inotify.py index 4c865ce1..ffd49b8f 100755 --- a/devel/management/commands/reporead_inotify.py +++ b/devel/management/commands/reporead_inotify.py @@ -15,6 +15,7 @@ """ import logging +import multiprocessing import os.path import pyinotify import sys @@ -133,6 +134,7 @@ def queue_for_update(self, mtime): return if self.update_thread: self.update_thread.cancel() + self.update_thread = None self._start_update_countdown() def update(self): @@ -142,8 +144,13 @@ def update(self): self.updating = True try: - # invoke reporead's primary method - read_repo(self.arch, self.path, {}) + # invoke reporead's primary method. we do this in a separate + # process for memory conservation purposes; these processes grow + # rather large so it is best to free up the memory ASAP. + process = multiprocessing.Process(target=read_repo, + args=[self.arch, self.path, {}]) + process.start() + process.join() finally: logger.debug('Done updating database %s.', self.path) with self.lock: -- cgit v1.2.3-54-g00ecf