name = "feed.import"
routing_key = 'feed.import'
ignore_result = True
default_retry_delay = 5 * 60 # retry in 5 minutes
max_retries = 72 # 6 Hours to cover major outages
def run(self, podcast_id, **kwargs):
logger = self.get_logger(**kwargs)
# The cache key consists of the task name and the MD5 digest of the feed id.
lock_id = "%s-lock-%s" % (self.name, podcast_id)
is_locked = lambda: str(cache.get(lock_id)) == "true"
acquire_lock = lambda: cache.set(lock_id, "true", 300)
# memcache delete is very slow, so we'd rather set a false value
# with a very low expiry time.
release_lock = lambda: cache.set(lock_id, "nil", 1)
logger.debug("Trying to import feed: %s" % podcast_id)
logger.debug("Feed %s is already being imported by another worker" % podcast_id)
except Exception, exc:
• running out of disk space ==
• queue priorities, difficult
• non-pickle-able errors
• crashing consumers
• tasksets / callbacks
• remote control tasks
• abortable tasks
• eta – run tasks at a set time
• expiring tasks
• ajax views