purge_queues command now has warning about impact of Redis flushall, plus add some (log) output when you run a purge.

This commit is contained in:
David Read 2015-10-21 16:12:40 +00:00
parent 6360681a8f
commit d1f84295f8
2 changed files with 6 additions and 1 deletions

View File

@ -45,6 +45,8 @@ class Harvester(CkanCommand):
harvester purge_queues
- removes all jobs from fetch and gather queue
WARNING: if using Redis, this command purges any other data you have
in Redis too!
harvester [-j] [-o] [--segments={segments}] import [{source-id}]
- perform the import stage with the last fetched objects, for a certain

View File

@ -82,10 +82,13 @@ def purge_queues():
if backend in ('amqp', 'ampq'):
channel = connection.channel()
channel.queue_purge(queue=get_gather_queue_name())
log.info('AMQP queue purged: %s', get_gather_queue_name())
channel.queue_purge(queue=get_fetch_queue_name())
log.info('AMQP queue purged: %s', get_fetch_queue_name())
return
if backend == 'redis':
connection.flushall()
log.info('Redis flushed')
def resubmit_jobs():
if config.get('ckan.harvest.mq.type') != 'redis':
@ -95,7 +98,7 @@ def resubmit_jobs():
for key in harvest_object_pending:
date_of_key = datetime.datetime.strptime(redis.get(key),
"%Y-%m-%d %H:%M:%S.%f")
if (datetime.datetime.now() - date_of_key).seconds > 180: # 3 minuites for fetch and import max
if (datetime.datetime.now() - date_of_key).seconds > 180: # 3 minutes for fetch and import max
redis.rpush('harvest_object_id',
json.dumps({'harvest_object_id': key.split(':')[-1]})
)