diff options
Diffstat (limited to 'devel')
-rw-r--r-- | devel/management/commands/generate_keyring.py | 59 | ||||
-rw-r--r-- | devel/management/commands/rematch_packager.py | 2 | ||||
-rw-r--r-- | devel/management/commands/reporead.py | 249 | ||||
-rwxr-xr-x | devel/management/commands/reporead_inotify.py | 211 | ||||
-rw-r--r-- | devel/utils.py | 16 | ||||
-rw-r--r-- | devel/views.py | 20 |
6 files changed, 437 insertions, 120 deletions
diff --git a/devel/management/commands/generate_keyring.py b/devel/management/commands/generate_keyring.py new file mode 100644 index 00000000..35ab8874 --- /dev/null +++ b/devel/management/commands/generate_keyring.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" +generate_keyring command + +Assemble a GPG keyring with all known developer keys. + +Usage: ./manage.py generate_keyring <keyserver> <keyring_path> +""" + +from django.core.management.base import BaseCommand, CommandError + +import logging +import subprocess +import sys + +from main.models import UserProfile + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s -> %(levelname)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + stream=sys.stderr) +logger = logging.getLogger() + +class Command(BaseCommand): + args = "<keyserver> <keyring_path>" + help = "Assemble a GPG keyring with all known developer keys." + + def handle(self, *args, **options): + v = int(options.get('verbosity', None)) + if v == 0: + logger.level = logging.ERROR + elif v == 1: + logger.level = logging.INFO + elif v == 2: + logger.level = logging.DEBUG + + if len(args) != 2: + raise CommandError("keyserver and keyring_path must be provided") + + return generate_keyring(args[0], args[1]) + +def generate_keyring(keyserver, keyring): + logger.info("getting all known key IDs") + + # Screw you Django, for not letting one natively do value != <empty string> + key_ids = UserProfile.objects.filter(user__is_active=True, + pgp_key__isnull=False).extra(where=["pgp_key != ''"]).values_list( + "pgp_key", flat=True) + logger.info("%d keys fetched from user profiles", len(key_ids)) + + gpg_cmd = ["gpg", "--no-default-keyring", "--keyring", keyring, + "--keyserver", keyserver, "--recv-keys"] + logger.info("running command: %r", gpg_cmd) + gpg_cmd.extend(key_ids) + subprocess.check_call(gpg_cmd) + logger.info("keyring at %s successfully updated", keyring) + +# vim: set ts=4 sw=4 et: diff --git a/devel/management/commands/rematch_packager.py b/devel/management/commands/rematch_packager.py index ba6e6a54..461d83ab 100644 --- a/devel/management/commands/rematch_packager.py +++ b/devel/management/commands/rematch_packager.py @@ -24,7 +24,7 @@ logging.basicConfig( logger = logging.getLogger() class Command(NoArgsCommand): - help = "Runs a check on all active mirror URLs to determine if they are reachable via IPv4 and/or v6." + help = "Match all packages with a packager_str but NULL packager_id to a packager if we can find one." def handle_noargs(self, **options): v = int(options.get('verbosity', None)) diff --git a/devel/management/commands/reporead.py b/devel/management/commands/reporead.py index 470b785d..cf101d97 100644 --- a/devel/management/commands/reporead.py +++ b/devel/management/commands/reporead.py @@ -13,10 +13,6 @@ Example: ./manage.py reporead i686 /tmp/core.db.tar.gz """ -from django.core.management.base import BaseCommand, CommandError -from django.contrib.auth.models import User -from django.db import transaction - from collections import defaultdict import io import os @@ -27,6 +23,11 @@ import logging from datetime import datetime from optparse import make_option +from django.core.management.base import BaseCommand, CommandError +from django.contrib.auth.models import User +from django.db import connections, router, transaction +from django.db.utils import IntegrityError + from devel.utils import UserFinder from main.models import Arch, Package, PackageDepend, PackageFile, Repo from packages.models import Conflict, Provision, Replacement @@ -36,6 +37,8 @@ logging.basicConfig( format='%(asctime)s -> %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stderr) +TRACE = 5 +logging.addLevelName(TRACE, 'TRACE') logger = logging.getLogger() class Command(BaseCommand): @@ -51,8 +54,6 @@ class Command(BaseCommand): def handle(self, arch=None, filename=None, **options): if not arch: raise CommandError('Architecture is required.') - if not validate_arch(arch): - raise CommandError('Specified architecture %s is not currently known.' % arch) if not filename: raise CommandError('Package database file is required.') filename = os.path.normpath(filename) @@ -72,8 +73,8 @@ class Command(BaseCommand): class Pkg(object): """An interim 'container' object for holding Arch package data.""" - bare = ( 'name', 'base', 'arch', 'desc', 'filename', - 'md5sum', 'url', 'packager' ) + bare = ( 'name', 'base', 'arch', 'filename', + 'md5sum', 'sha256sum', 'url', 'packager' ) number = ( 'csize', 'isize' ) collections = ( 'depends', 'optdepends', 'conflicts', 'provides', 'replaces', 'groups', 'license', 'files' ) @@ -85,6 +86,7 @@ class Pkg(object): self.ver = None self.rel = None self.epoch = 0 + self.pgpsig = None for k in self.bare + self.number: setattr(self, k, None) for k in self.collections: @@ -99,6 +101,9 @@ class Pkg(object): setattr(self, k, v[0][:254]) elif k in self.number: setattr(self, k, long(v[0])) + elif k in ('desc', 'pgpsig'): + # do NOT prune these values at all + setattr(self, k, v[0]) elif k == 'version': match = self.version_re.match(v[0]) self.ver = match.group(3) @@ -185,8 +190,6 @@ def create_multivalued(dbpkg, repopkg, db_attr, repo_attr): finder = UserFinder() def populate_pkg(dbpkg, repopkg, force=False, timestamp=None): - db_score = 1 - if repopkg.base: dbpkg.pkgbase = repopkg.base else: @@ -203,13 +206,14 @@ def populate_pkg(dbpkg, repopkg, force=False, timestamp=None): dbpkg.packager_str = repopkg.packager # attempt to find the corresponding django user for this string dbpkg.packager = finder.find(repopkg.packager) + dbpkg.pgp_signature = repopkg.pgpsig if timestamp: dbpkg.flag_date = None dbpkg.last_update = timestamp dbpkg.save() - db_score += populate_files(dbpkg, repopkg, force=force) + populate_files(dbpkg, repopkg, force=force) dbpkg.packagedepend_set.all().delete() for y in repopkg.depends: @@ -230,28 +234,23 @@ def populate_pkg(dbpkg, repopkg, force=False, timestamp=None): create_multivalued(dbpkg, repopkg, 'groups', 'groups') create_multivalued(dbpkg, repopkg, 'licenses', 'license') - related_score = (len(repopkg.depends) + len(repopkg.optdepends) - + len(repopkg.conflicts) + len(repopkg.provides) - + len(repopkg.replaces) + len(repopkg.groups) - + len(repopkg.license)) - if related_score: - db_score += (related_score / 20) + 1 - return db_score +pkg_same_version = lambda pkg, dbpkg: pkg.ver == dbpkg.pkgver \ + and pkg.rel == dbpkg.pkgrel and pkg.epoch == dbpkg.epoch def populate_files(dbpkg, repopkg, force=False): if not force: - if dbpkg.pkgver != repopkg.ver or dbpkg.pkgrel != repopkg.rel \ - or dbpkg.epoch != repopkg.epoch: + if not pkg_same_version(repopkg, dbpkg): logger.info("DB version (%s) didn't match repo version " "(%s) for package %s, skipping file list addition", dbpkg.full_version, repopkg.full_version, dbpkg.pkgname) - return 0 + return if not dbpkg.files_last_update or not dbpkg.last_update: pass elif dbpkg.files_last_update > dbpkg.last_update: - return 0 + return + # only delete files if we are reading a DB that contains them if repopkg.has_files: dbpkg.packagefile_set.all().delete() @@ -270,30 +269,19 @@ def populate_files(dbpkg, repopkg, force=False): pkgfile.save(force_insert=True) dbpkg.files_last_update = datetime.utcnow() dbpkg.save() - return (len(repopkg.files) / 50) + 1 - return 0 -class Batcher(object): - def __init__(self, threshold, start=0): - self.threshold = threshold - self.meter = start +def select_pkg_for_update(dbpkg): + database = router.db_for_write(Package, instance=dbpkg) + connection = connections[database] + if 'sqlite' in connection.settings_dict['ENGINE'].lower(): + return dbpkg + new_pkg = Package.objects.raw( + 'SELECT * FROM packages WHERE id = %s FOR UPDATE', + [dbpkg.id]) + return list(new_pkg)[0] - def batch_commit(self, score): - """ - Track updates to the database and perform a commit if the batch - becomes sufficiently large. "Large" is defined by waiting for the - sum of scores to exceed the arbitrary threshold value; once it is - hit a commit is issued. - """ - self.meter += score - if self.meter > self.threshold: - logger.debug("Committing transaction, batch threshold hit") - transaction.commit() - self.meter = 0 - -@transaction.commit_on_success def db_update(archname, reponame, pkgs, options): """ Parses a list and updates the Arch dev database accordingly. @@ -305,88 +293,111 @@ def db_update(archname, reponame, pkgs, options): logger.info('Updating Arch: %s', archname) force = options.get('force', False) filesonly = options.get('filesonly', False) - repository = Repo.objects.get(name__iexact=reponame) - architecture = Arch.objects.get(name__iexact=archname) - # no-arg order_by() removes even the default ordering; we don't need it - dbpkgs = Package.objects.filter( - arch=architecture, repo=repository).order_by() - # This makes our inner loop where we find packages by name *way* more - # efficient by not having to go to the database for each package to - # SELECT them by name. - dbdict = dict([(pkg.pkgname, pkg) for pkg in dbpkgs]) - - logger.debug("Creating sets") - dbset = set(dbdict.keys()) - syncset = set([pkg.name for pkg in pkgs]) - logger.info("%d packages in current web DB", len(dbset)) - logger.info("%d packages in new updating db", len(syncset)) - in_sync_not_db = syncset - dbset - logger.info("%d packages in sync not db", len(in_sync_not_db)) - - # Try to catch those random package deletions that make Eric so unhappy. - if len(dbset): - dbpercent = 100.0 * len(syncset) / len(dbset) - else: - dbpercent = 0.0 - logger.info("DB package ratio: %.1f%%", dbpercent) - - # Fewer than 20 packages makes the percentage check unreliable, but it also - # means we expect the repo to fluctuate a lot. - msg = "Package database has %.1f%% the number of packages in the " \ - "web database" % dbpercent - if len(dbset) == 0 and len(syncset) == 0: - pass - elif not filesonly and \ - len(dbset) > 20 and dbpercent < 50.0 and \ - not repository.testing and not repository.staging: - logger.error(msg) - raise Exception(msg) - elif dbpercent < 75.0: - logger.warning(msg) - - batcher = Batcher(100) + + with transaction.commit_manually(): + repository = Repo.objects.get(name__iexact=reponame) + architecture = Arch.objects.get(name__iexact=archname) + # no-arg order_by() removes even the default ordering; we don't need it + dbpkgs = Package.objects.filter( + arch=architecture, repo=repository).order_by() + # This makes our inner loop where we find packages by name *way* more + # efficient by not having to go to the database for each package to + # SELECT them by name. + dbdict = dict((dbpkg.pkgname, dbpkg) for dbpkg in dbpkgs) + + logger.debug("Creating sets") + dbset = set(dbdict.keys()) + syncset = set([pkg.name for pkg in pkgs]) + logger.info("%d packages in current web DB", len(dbset)) + logger.info("%d packages in new updating db", len(syncset)) + in_sync_not_db = syncset - dbset + logger.info("%d packages in sync not db", len(in_sync_not_db)) + + # Try to catch those random package deletions that make Eric so unhappy. + if len(dbset): + dbpercent = 100.0 * len(syncset) / len(dbset) + else: + dbpercent = 0.0 + logger.info("DB package ratio: %.1f%%", dbpercent) + + # Fewer than 20 packages makes the percentage check unreliable, but it also + # means we expect the repo to fluctuate a lot. + msg = "Package database has %.1f%% the number of packages in the " \ + "web database" % dbpercent + if len(dbset) == 0 and len(syncset) == 0: + pass + elif not filesonly and \ + len(dbset) > 20 and dbpercent < 50.0 and \ + not repository.testing and not repository.staging: + logger.error(msg) + raise Exception(msg) + elif dbpercent < 75.0: + logger.warning(msg) + + # If isolation level is repeatable-read, we need to ensure each package + # update starts a new transaction and re-queries the database as necessary + # to guard against simultaneous updates + transaction.commit() if not filesonly: # packages in syncdb and not in database (add to database) - for p in [x for x in pkgs if x.name in in_sync_not_db]: - logger.info("Adding package %s", p.name) - pkg = Package(pkgname=p.name, arch=architecture, repo=repository) - score = populate_pkg(pkg, p, timestamp=datetime.utcnow()) - batcher.batch_commit(score) + for pkg in (pkg for pkg in pkgs if pkg.name in in_sync_not_db): + logger.info("Adding package %s", pkg.name) + dbpkg = Package(pkgname=pkg.name, arch=architecture, repo=repository) + try: + with transaction.commit_on_success(): + populate_pkg(dbpkg, pkg, timestamp=datetime.utcnow()) + except IntegrityError: + logger.warning("Could not add package %s; " + "not fatal if another thread beat us to it.", + pkg.name, exc_info=True) # packages in database and not in syncdb (remove from database) - in_db_not_sync = dbset - syncset - for p in in_db_not_sync: - logger.info("Removing package %s", p) - dbp = dbdict[p] - dbp.delete() - batcher.batch_commit(1) + for pkgname in (dbset - syncset): + logger.info("Removing package %s", pkgname) + dbpkg = dbdict[pkgname] + with transaction.commit_on_success(): + # no race condition here as long as simultaneous threads both + # issue deletes; second delete will be a no-op + dbpkg.delete() # packages in both database and in syncdb (update in database) pkg_in_both = syncset & dbset - for p in [x for x in pkgs if x.name in pkg_in_both]: - logger.debug("Looking for package updates") - dbp = dbdict[p.name] + for pkg in (x for x in pkgs if x.name in pkg_in_both): + logger.debug("Checking package %s", pkg.name) + dbpkg = dbdict[pkg.name] timestamp = None # for a force, we don't want to update the timestamp. # for a non-force, we don't want to do anything at all. if filesonly: pass - elif p.ver == dbp.pkgver and p.rel == dbp.pkgrel \ - and p.epoch == dbp.epoch: + elif pkg_same_version(pkg, dbpkg): if not force: continue else: timestamp = datetime.utcnow() + # The odd select_for_update song and dance here are to ensure + # simultaneous updates don't happen on a package, causing + # files/depends/all related items to be double-imported. if filesonly: - logger.debug("Checking files for package %s", p.name) - score = populate_files(dbp, p, force=force) + with transaction.commit_on_success(): + # TODO Django 1.4 select_for_update() will work once released + dbpkg = select_pkg_for_update(dbpkg) + if pkg_same_version(pkg, dbpkg): + logger.debug("Package %s was already updated", pkg.name) + continue + logger.debug("Checking files for package %s", pkg.name) + populate_files(dbpkg, pkg, force=force) else: - logger.info("Updating package %s", p.name) - score = populate_pkg(dbp, p, force=force, timestamp=timestamp) - - batcher.batch_commit(score) + with transaction.commit_on_success(): + # TODO Django 1.4 select_for_update() will work once released + dbpkg = select_pkg_for_update(dbpkg) + if pkg_same_version(pkg, dbpkg): + logger.debug("Package %s was already updated", pkg.name) + continue + logger.info("Updating package %s", pkg.name) + populate_pkg(dbpkg, pkg, force=force, timestamp=timestamp) logger.info('Finished updating Arch: %s', archname) @@ -403,7 +414,7 @@ def parse_info(iofile): continue elif line.startswith('%') and line.endswith('%'): blockname = line[1:-1].lower() - logger.debug("Parsing package block %s", blockname) + logger.log(TRACE, "Parsing package block %s", blockname) store[blockname] = [] elif blockname: store[blockname].append(line) @@ -445,49 +456,59 @@ def parse_repo(repopath): continue data_file = repodb.extractfile(tarinfo) data_file = io.TextIOWrapper(io.BytesIO(data_file.read()), - encoding='utf=8') + encoding='UTF-8') try: pkgs[pkgid].populate(parse_info(data_file)) except UnicodeDecodeError: logger.warn("Could not correctly decode %s, skipping file", tarinfo.name) data_file.close() + del data_file - logger.debug("Done parsing file %s", fname) + logger.debug("Done parsing file %s/%s", pkgid, fname) repodb.close() logger.info("Finished repo parsing, %d total packages", len(pkgs)) return (reponame, pkgs.values()) -def validate_arch(archname): +def locate_arch(arch): "Check if arch is valid." - return Arch.objects.filter(name__iexact=archname).exists() + if isinstance(arch, Arch): + return arch + try: + return Arch.objects.get(name__iexact=arch) + except Arch.DoesNotExist: + raise CommandError( + 'Specified architecture %s is not currently known.' % arch) + def read_repo(primary_arch, repo_file, options): """ Parses repo.db.tar.gz file and returns exit status. """ + # always returns an Arch object, regardless of what is passed in + primary_arch = locate_arch(primary_arch) + repo, packages = parse_repo(repo_file) # group packages by arch -- to handle noarch stuff packages_arches = {} for arch in Arch.objects.filter(agnostic=True): packages_arches[arch.name] = [] - packages_arches[primary_arch] = [] + packages_arches[primary_arch.name] = [] for package in packages: if package.arch in packages_arches: packages_arches[package.arch].append(package) else: # we don't include mis-arched packages - logger.warning("Package %s arch = %s", - package.name, package.arch) + logger.warning("Package %s arch = %s", package.name, package.arch) del packages - logger.info('Starting database updates.') + logger.info('Starting database updates for %s.', repo_file) for arch in sorted(packages_arches.keys()): db_update(arch, repo, packages_arches[arch], options) - logger.info('Finished database updates.') + logger.info('Finished database updates for %s.', repo_file) return 0 # vim: set ts=4 sw=4 et: diff --git a/devel/management/commands/reporead_inotify.py b/devel/management/commands/reporead_inotify.py new file mode 100755 index 00000000..c74762eb --- /dev/null +++ b/devel/management/commands/reporead_inotify.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +""" +reporead_inotify command + +Watches repo.files.tar.gz files for updates and parses them after a short delay +in order to catch all updates in a single bulk update. + +Usage: ./manage.py reporead_inotify [path_template] + +Where 'path_template' is an optional path_template for finding the +repo.files.tar.gz files. The form is '/srv/ftp/%(repo)s/os/%(arch)s/', which is +also the default template if none is specified. While 'repo' is not required to +be present in the path_template, note that 'arch' is so reporead can function +correctly. +""" + +import logging +import multiprocessing +import os +import pyinotify +import sys +import threading +import time + +from django.core.management.base import BaseCommand, CommandError +from django.db import connection + +from main.models import Arch, Repo +from .reporead import read_repo + +logging.basicConfig( + level=logging.WARNING, + format='%(asctime)s -> %(levelname)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + stream=sys.stderr) +logger = logging.getLogger() + +class Command(BaseCommand): + help = "Watch database files and run an update when necessary." + args = "[path_template]" + + def handle(self, path_template=None, **options): + v = int(options.get('verbosity', 0)) + if v == 0: + logger.level = logging.ERROR + elif v == 1: + logger.level = logging.INFO + elif v == 2: + logger.level = logging.DEBUG + + if not path_template: + path_template = '/srv/ftp/%(repo)s/os/%(arch)s/' + self.path_template = path_template + + notifier = self.setup_notifier() + logger.info('Entering notifier loop') + notifier.loop() + + logger.info('Cancelling remaining threads...') + for thread in threading.enumerate(): + if hasattr(thread, 'cancel'): + thread.cancel() + + def setup_notifier(self): + '''Set up and configure the inotify machinery and logic. + This takes the provided or default path_template and builds a list of + directories we need to watch for database updates. It then validates + and passes these on to the various pyinotify pieces as necessary and + finally builds and returns a notifier object.''' + arches = Arch.objects.filter(agnostic=False) + repos = Repo.objects.all() + arch_path_map = dict((arch, None) for arch in arches) + all_paths = set() + total_paths = 0 + for arch in arches: + combos = ({ 'repo': repo.name.lower(), 'arch': arch.name } + for repo in repos) + # take a python format string and generate all unique combinations + # of directories from it; using set() ensures we filter it down + paths = set(self.path_template % values for values in combos) + total_paths += len(paths) + all_paths |= paths + arch_path_map[arch] = paths + + logger.info('Watching %d total paths', total_paths) + logger.debug(all_paths) + + # sanity check- basically ensure every path we created from the + # template mapped to only one architecture + if total_paths != len(all_paths): + raise CommandError('path template did not uniquely ' + 'determine architecture for each file') + + # this thread is done using the database; all future access is done in + # the spawned read_repo() processes, so close the otherwise completely + # idle connection. + connection.close() + + # A proper atomic replacement of the database as done by rsync is type + # IN_MOVED_TO. repo-add/remove will finish with a IN_CLOSE_WRITE. + mask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_TO + + manager = pyinotify.WatchManager() + for name in all_paths: + manager.add_watch(name, mask) + + handler = EventHandler(arch_paths=arch_path_map) + return pyinotify.Notifier(manager, handler) + + +class Database(object): + '''A object representing a pacman database on the filesystem. It stores + various bits of metadata and state representing the file path, when we last + updated, how long our delay is before performing the update, whether we are + updating now, etc.''' + def __init__(self, arch, path, delay=60.0, nice=3): + self.arch = arch + self.path = path + self.delay = delay + self.nice = nice + self.mtime = None + self.last_import = None + self.update_thread = None + self.updating = False + self.run_again = False + self.lock = threading.Lock() + + def _start_update_countdown(self): + self.update_thread = threading.Timer(self.delay, self.update) + logger.info('Starting %.1f second countdown to update %s', + self.delay, self.path) + self.update_thread.start() + + def queue_for_update(self, mtime): + logger.debug('Queueing database %s...', self.path) + with self.lock: + self.mtime = mtime + if self.updating: + # store the fact that we will need to run it again + self.run_again = True + return + if self.update_thread: + self.update_thread.cancel() + self.update_thread = None + self._start_update_countdown() + + def update(self): + logger.debug('Updating database %s...', self.path) + with self.lock: + self.last_import = time.time() + self.updating = True + + try: + # invoke reporead's primary method. we do this in a separate + # process for memory conservation purposes; these processes grow + # rather large so it is best to free up the memory ASAP. + def run(): + if self.nice != 0: + os.nice(self.nice) + read_repo(self.arch, self.path, {}) + + process = multiprocessing.Process(target=run) + process.start() + process.join() + finally: + logger.debug('Done updating database %s.', self.path) + with self.lock: + self.update_thread = None + self.updating = False + if self.run_again: + self.run_again = False + self._start_update_countdown() + + +class EventHandler(pyinotify.ProcessEvent): + '''Our main event handler which listens for database change events. Because + we are watching the whole directory, we filter down and only look at those + events dealing with files databases.''' + + def my_init(self, **kwargs): + self.databases = {} + self.arch_lookup = {} + + # we really want a single path to arch mapping, so massage the data + arch_paths = kwargs['arch_paths'] + for arch, paths in arch_paths.items(): + self.arch_lookup.update((path.rstrip('/'), arch) for path in paths) + + def process_default(self, event): + '''Primary event processing function which kicks off reporead timer + threads if a files database was updated.''' + if not event.name: + return + # screen to only the files we care about + if event.name.endswith('.files.tar.gz'): + path = event.pathname + stat = os.stat(path) + database = self.databases.get(path, None) + if database is None: + arch = self.arch_lookup.get(event.path, None) + if arch is None: + logger.warning( + 'Could not determine arch for %s, skipping update', + path) + return + database = Database(arch, path) + self.databases[path] = database + database.queue_for_update(stat.st_mtime) + + +# vim: set ts=4 sw=4 et: diff --git a/devel/utils.py b/devel/utils.py index d7a154a8..62b12cd5 100644 --- a/devel/utils.py +++ b/devel/utils.py @@ -47,6 +47,7 @@ SELECT pr.user_id, COUNT(*), COUNT(p.flag_date) class UserFinder(object): def __init__(self): self.cache = {} + self.username_cache = {} @staticmethod def user_email(name, email): @@ -111,7 +112,22 @@ class UserFinder(object): self.cache[userstring] = user return user + def find_by_username(self, username): + if not username: + return None + if username in self.username_cache: + return self.username_cache[username] + + try: + user = User.objects.get(username=username) + except User.DoesNotExist: + user = None + + self.username_cache[username] = user + return user + def clear_cache(self): self.cache = {} + self.username_cache = {} # vim: set ts=4 sw=4 et: diff --git a/devel/views.py b/devel/views.py index 27c32e7b..7cc45419 100644 --- a/devel/views.py +++ b/devel/views.py @@ -18,6 +18,7 @@ from main.models import Package, PackageDepend, PackageFile, TodolistPkg from main.models import Arch, Repo from main.models import UserProfile from packages.models import PackageRelation +from packages.utils import get_signoff_groups from todolists.utils import get_annotated_todolists from .utils import get_annotated_maintainers @@ -31,7 +32,12 @@ from string import ascii_letters, digits @never_cache def index(request): '''the developer dashboard''' - inner_q = PackageRelation.objects.filter(user=request.user).values('pkgbase') + if(request.user.is_authenticated()): + inner_q = PackageRelation.objects.filter(user=request.user) + else: + inner_q = PackageRelation.objects.none() + inner_q = inner_q.values('pkgbase') + flagged = Package.objects.normal().filter( flag_date__isnull=False, pkgbase__in=inner_q).order_by('pkgname') @@ -43,6 +49,9 @@ def index(request): todolists = get_annotated_todolists() todolists = [todolist for todolist in todolists if todolist.incomplete_count > 0] + signoffs = sorted(get_signoff_groups(user=request.user), + key=operator.attrgetter('pkgbase')) + maintainers = get_annotated_maintainers() maintained = PackageRelation.objects.filter( @@ -65,6 +74,7 @@ def index(request): 'orphan': orphan, 'flagged' : flagged, 'todopkgs' : todopkgs, + 'signoffs': signoffs } return direct_to_template(request, 'devel/index.html', page_dict) @@ -73,11 +83,11 @@ def index(request): @never_cache def clock(request): devs = User.objects.filter(is_active=True).order_by( - 'username').select_related('userprofile') + 'first_name', 'last_name').select_related('userprofile') - # now annotate each dev object with their current time now = datetime.now() utc_now = datetime.utcnow().replace(tzinfo=pytz.utc) + # now annotate each dev object with their current time for dev in devs: tz = pytz.timezone(dev.userprofile.time_zone) dev.current_time = utc_now.astimezone(tz) @@ -142,12 +152,12 @@ def report(request, report, username=None): if report == 'old': title = 'Packages last built more than two years ago' - cutoff = datetime.now() - timedelta(days=365 * 2) + cutoff = datetime.utcnow() - timedelta(days=365 * 2) packages = packages.filter( build_date__lt=cutoff).order_by('build_date') elif report == 'long-out-of-date': title = 'Packages marked out-of-date more than 90 days ago' - cutoff = datetime.now() - timedelta(days=90) + cutoff = datetime.utcnow() - timedelta(days=90) packages = packages.filter( flag_date__lt=cutoff).order_by('flag_date') elif report == 'big': |