fix: retry transactions who fail due to deadlocks

In my opinion, this kind of handling of transactions is pretty ugly.
The being said, we have issues with running into deadlocks on aur.al,
so this commit works against that immediate bug.

An ideal solution would be to deal with retrying transactions through
the `db.begin()` scope, so we wouldn't have to explicitly annotate
functions as "retry functions," which is what this commit does.

Closes #376

Signed-off-by: Kevin Morris <kevr@0cost.org>
This commit is contained in:
Kevin Morris 2022-09-13 12:47:52 -07:00
parent f450b5dfc7
commit ec3152014b
No known key found for this signature in database
GPG key ID: F7E46DED420788F3
16 changed files with 241 additions and 82 deletions

View file

@ -161,6 +161,46 @@ def begin():
return get_session().begin()
def retry_deadlock(func):
from sqlalchemy.exc import OperationalError
def wrapper(*args, _i: int = 0, **kwargs):
# Retry 10 times, then raise the exception
# If we fail before the 10th, recurse into `wrapper`
# If we fail on the 10th, continue to throw the exception
limit = 10
try:
return func(*args, **kwargs)
except OperationalError as exc:
if _i < limit and "Deadlock found" in str(exc):
# Retry on deadlock by recursing into `wrapper`
return wrapper(*args, _i=_i + 1, **kwargs)
# Otherwise, just raise the exception
raise exc
return wrapper
def async_retry_deadlock(func):
from sqlalchemy.exc import OperationalError
async def wrapper(*args, _i: int = 0, **kwargs):
# Retry 10 times, then raise the exception
# If we fail before the 10th, recurse into `wrapper`
# If we fail on the 10th, continue to throw the exception
limit = 10
try:
return await func(*args, **kwargs)
except OperationalError as exc:
if _i < limit and "Deadlock found" in str(exc):
# Retry on deadlock by recursing into `wrapper`
return await wrapper(*args, _i=_i + 1, **kwargs)
# Otherwise, just raise the exception
raise exc
return wrapper
def get_sqlalchemy_url():
"""
Build an SQLAlchemy URL for use with create_engine.