Skip to content

Commit

Permalink
teuthology/queue: Single command for queue operations
Browse files Browse the repository at this point in the history
Makes the same teuthology-queue commands work regardless of the queue backend, Paddles or Beanstalk.

Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
  • Loading branch information
amathuria committed May 4, 2022
1 parent 99bd46f commit 2abb165
Show file tree
Hide file tree
Showing 10 changed files with 148 additions and 185 deletions.
45 changes: 0 additions & 45 deletions scripts/paddles_queue.py

This file was deleted.

15 changes: 12 additions & 3 deletions scripts/queue.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import docopt

import teuthology.config
import teuthology.queue.beanstalk
import teuthology.queue.paddles
from teuthology.config import config

doc = """
usage: teuthology-queue -h
teuthology-queue [-s|-d|-f] -m MACHINE_TYPE
teuthology-queue [-r] -m MACHINE_TYPE
teuthology-queue -m MACHINE_TYPE -D PATTERN
teuthology-queue -p SECONDS [-m MACHINE_TYPE]
teuthology-queue -p SECONDS [-m MACHINE_TYPE] [-U USER]
teuthology-queue -m MACHINE_TYPE -P PRIORITY [-U USER|-R RUN_NAME]
List Jobs in queue.
If -D is passed, then jobs with PATTERN in the job name are deleted from the
Expand All @@ -29,9 +30,17 @@
-p, --pause SECONDS Pause queues for a number of seconds. A value of 0
will unpause. If -m is passed, pause that queue,
otherwise pause all queues.
-P, --priority PRIORITY
Change priority of queued jobs (only in Paddles queues)
-U, --user USER User who owns the jobs
-R, --run-name RUN_NAME
Used to change priority of all jobs in the run.
"""


def main():
args = docopt.docopt(doc)
teuthology.queue.main(args)
if config.backend == 'beanstalk':
teuthology.queue.beanstalk.main(args)
else:
teuthology.queue.paddles.main(args)
3 changes: 1 addition & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,7 @@
'teuthology-results = scripts.results:main',
'teuthology-report = scripts.report:main',
'teuthology-kill = scripts.kill:main',
'teuthology-paddles-queue = scripts.paddles_queue:main',
'teuthology-beanstalk-queue = scripts.beanstalk_queue:main',
'teuthology-queue=scripts.queue:main',
'teuthology-prune-logs = scripts.prune_logs:main',
'teuthology-describe = scripts.describe:main',
'teuthology-reimage = scripts.reimage:main',
Expand Down
5 changes: 5 additions & 0 deletions teuthology/dispatcher/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ def main(args):
if backend == 'beanstalk':
connection = beanstalk.connect()
beanstalk.watch_tube(connection, machine_type)
elif backend == 'paddles':
report.create_machine_type_queue(machine_type)

result_proc = None

Expand Down Expand Up @@ -131,6 +133,9 @@ def main(args):
else:
job = report.get_queued_job(machine_type)
if job is None:
if exit_on_empty_queue and not job_procs:
log.info("Queue is empty and no supervisor processes running; exiting!")
break
continue
job = clean_config(job)
report.try_push_job_info(job, dict(status='running'))
Expand Down
106 changes: 0 additions & 106 deletions teuthology/queue/__init__.py
Original file line number Diff line number Diff line change
@@ -1,106 +0,0 @@
import logging
import pprint
import sys
from collections import OrderedDict

from teuthology import report
from teuthology.config import config

log = logging.getLogger(__name__)

def print_progress(index, total, message=None):
msg = "{m} ".format(m=message) if message else ''
sys.stderr.write("{msg}{i}/{total}\r".format(
msg=msg, i=index, total=total))
sys.stderr.flush()

def end_progress():
sys.stderr.write('\n')
sys.stderr.flush()

class JobProcessor(object):
def __init__(self):
self.jobs = OrderedDict()

def add_job(self, job_id, job_config, job_obj=None):
job_id = str(job_id)

job_dict = dict(
index=(len(self.jobs) + 1),
job_config=job_config,
)
if job_obj:
job_dict['job_obj'] = job_obj
self.jobs[job_id] = job_dict

self.process_job(job_id)

def process_job(self, job_id):
pass

def complete(self):
pass


class JobPrinter(JobProcessor):
def __init__(self, show_desc=False, full=False):
super(JobPrinter, self).__init__()
self.show_desc = show_desc
self.full = full

def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_index = self.jobs[job_id]['index']
job_priority = job_config['priority']
job_name = job_config['name']
job_desc = job_config['description']
print('Job: {i:>4} priority: {pri:>4} {job_name}/{job_id}'.format(
i=job_index,
pri=job_priority,
job_id=job_id,
job_name=job_name,
))
if self.full:
pprint.pprint(job_config)
elif job_desc and self.show_desc:
for desc in job_desc.split():
print('\t {}'.format(desc))


class RunPrinter(JobProcessor):
def __init__(self):
super(RunPrinter, self).__init__()
self.runs = list()

def process_job(self, job_id):
run = self.jobs[job_id]['job_config']['name']
if run not in self.runs:
self.runs.append(run)
print(run)


class JobDeleter(JobProcessor):
def __init__(self, pattern):
self.pattern = pattern
super(JobDeleter, self).__init__()

def add_job(self, job_id, job_config, job_obj=None):
job_name = job_config['name']
if self.pattern in job_name:
super(JobDeleter, self).add_job(job_id, job_config, job_obj)

def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_name = job_config['name']
print('Deleting {job_name}/{job_id}'.format(
job_id=job_id,
job_name=job_name,
))
report.try_delete_jobs(job_name, job_id)


def main(args):
if config.backend == 'paddles':
paddles.main(args)
else:
beanstalk.main(args)
1 change: 1 addition & 0 deletions teuthology/queue/beanstalk.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

from teuthology.config import config
from teuthology import report
from teuthology.queue.util import *

log = logging.getLogger(__name__)

Expand Down
35 changes: 16 additions & 19 deletions teuthology/queue/paddles.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from teuthology import report
from teuthology.dispatcher import pause_queue

from teuthology.queue.util import *

log = logging.getLogger(__name__)

Expand All @@ -14,31 +14,29 @@ def stats_queue(machine_type):
stats = report.get_queue_stats(machine_type)
if stats['paused'] is None:
log.info("%s queue is currently running with %s jobs queued",
stats['name'],
stats['count'])
stats['queue'],
stats['queued_jobs'])
else:
log.info("%s queue is paused with %s jobs queued",
stats['name'],
stats['count'])
stats['queue'],
stats['queued_jobs'])


def update_priority(machine_type, priority, user, run_name=None):
def update_priority(machine_type, priority, run_name=None):
if run_name is not None:
jobs = report.get_user_jobs_queue(machine_type, user, run_name)
else:
jobs = report.get_user_jobs_queue(machine_type, user)
jobs = report.get_jobs_by_run(machine_type, run_name)
for job in jobs:
job['priority'] = priority
report.try_push_job_info(job)


def walk_jobs(machine_type, processor, user):
log.info("Checking paddles queue...")
job_count = report.get_queue_stats(machine_type)['count']
job_count = report.get_queue_stats(machine_type)['queued_jobs']

jobs = report.get_user_jobs_queue(machine_type, user)
if job_count == 0:
log.info('No jobs in queue')
log.info('No jobs in Paddles queue')
return

for i in range(1, job_count + 1):
Expand All @@ -54,24 +52,23 @@ def walk_jobs(machine_type, processor, user):

def main(args):
machine_type = args['--machine_type']
#user = args['--user']
#run_name = args['--run_name']
#priority = args['--priority']
user = args['--user']
run_name = args['--run-name']
status = args['--status']
delete = args['--delete']
runs = args['--runs']
show_desc = args['--description']
full = args['--full']
pause_duration = args['--pause']
#unpause = args['--unpause']
#pause_duration = args['--time']
priority = args['--priority']
try:
if status:
stats_queue(machine_type)
if pause_duration:
pause_queue(machine_type, pause, user, pause_duration)
#else:
#pause_queue(machine_type, pause, user)
if not user:
log.info('Please enter user to pause Paddles queue')
return
report.pause_queue(machine_type, user, pause_duration)
elif priority:
update_priority(machine_type, priority, run_name)
elif delete:
Expand Down
Loading

0 comments on commit 2abb165

Please sign in to comment.