/files/: make deletes cascade properly

This commit is contained in:
dogeystamp 2023-05-17 18:40:31 -04:00
parent c584950f11
commit 47d4e36930
Signed by: dogeystamp
GPG Key ID: 7225FE3592EFFA38
6 changed files with 66 additions and 15 deletions

View File

@ -6,6 +6,9 @@ from flask_marshmallow import Marshmallow
from flask_bcrypt import Bcrypt
from flask_migrate import Migrate
from .config import DevelopmentConfig, ProductionConfig, TestingConfig, overlay_config
from sqlalchemy import event
from sqlalchemy.engine import Engine
from sqlite3 import Connection as SQLite3Connection
app = Flask(__name__)
CORS(app)
@ -33,6 +36,15 @@ storage = None
from sachet.storage import FileSystem
# https://stackoverflow.com/questions/57726047/
@event.listens_for(Engine, "connect")
def _set_sqlite_pragma(dbapi_connection, connection_record):
if isinstance(dbapi_connection, SQLite3Connection):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON;")
cursor.close()
with app.app_context():
db.create_all()
if _storage_method == "filesystem":

View File

@ -1,6 +1,6 @@
import click
from sachet.server import app, db
from sachet.server.models import User, Share, Permissions
from sachet.server.models import User, Share, Permissions, Upload
from sachet.server.users import manage
from flask.cli import AppGroup
from bitmask import Bitmask
@ -51,15 +51,21 @@ app.cli.add_command(user_cli)
def cleanup():
"""Clean up stale database entries.
Shares that are not initialized are deleted if they are older than 25 minutes.
Uninitialized shares and unfinished uploads older than a day are deleted.
"""
res = Share.query.filter(
Share.create_date < (datetime.datetime.now() - datetime.timedelta(minutes=25)),
Share.create_date < (datetime.datetime.now() - datetime.timedelta(hours=24)),
# do not use `Share.initialized` or `is False` here
# sqlalchemy doesn't like it
# noqa: E712
Share.initialized == False,
)
res.delete()
res = Upload.query.filter(
Upload.create_date < (datetime.datetime.now() - datetime.timedelta(hours=24))
)
res.delete()
db.session.commit()

View File

@ -111,7 +111,9 @@ class FileContentAPI(MethodView):
if upload.completed:
share.initialized = True
db.session.delete(upload)
# really convoluted
# but otherwise it doesn't cascade deletes?
Upload.query.filter(Upload.upload_id == upload.upload_id).delete()
db.session.commit()
return jsonify(dict(status="success", message="Upload completed.")), 201
else:

View File

@ -334,9 +334,9 @@ class Upload(db.Model):
chunks = db.relationship(
"Chunk",
backref=db.backref("upload"),
backref="upload",
passive_deletes=True,
order_by="Chunk.chunk_id",
cascade="all, delete",
)
def __init__(self, upload_id, total_chunks, share_id):
@ -401,7 +401,9 @@ class Chunk(db.Model):
chunk_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
create_date = db.Column(db.DateTime, nullable=False)
index = db.Column(db.Integer, nullable=False)
upload_id = db.Column(db.String, db.ForeignKey("uploads.upload_id"))
upload_id = db.Column(
db.String, db.ForeignKey("uploads.upload_id", ondelete="CASCADE")
)
filename = db.Column(db.String, nullable=False)
def __init__(self, index, upload_id, total_chunks, share, data):
@ -421,9 +423,11 @@ class Chunk(db.Model):
with file.open(mode="wb") as f:
f.write(data)
@classmethod
def __declare_last__(cls):
@event.listens_for(cls, "before_delete")
def chunk_before_delete(mapper, connection, chunk):
@event.listens_for(db.session, "persistent_to_deleted")
def chunk_delete_listener(session, instance):
# kinda hacky but i have no idea how to trigger event listener on cascaded delete
if isinstance(instance, Upload):
for chunk in instance.chunks:
file = storage.get_file(chunk.filename)
file.delete()

View File

@ -3,7 +3,7 @@ from sachet.server.commands import create_user, delete_user, cleanup
from sqlalchemy import inspect
from sachet.server import db
import datetime
from sachet.server.models import User, Share
from sachet.server.models import User, Share, Chunk, Upload
def test_user(client, cli):
@ -28,12 +28,12 @@ def test_user(client, cli):
def test_cleanup(client, cli):
"""Test the CLI's ability to destroy uninitialized shares past expiry."""
"""Test the CLI's ability to destroy stale entries."""
# create shares
# this one will be destroyed
share = Share()
db.session.add(share)
share.create_date = datetime.datetime.now() - datetime.timedelta(minutes=30)
share.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
destroyed = share.share_id
# this one won't
share = Share()
@ -42,7 +42,7 @@ def test_cleanup(client, cli):
# this one neither
share = Share()
share.initialized = True
share.create_date = datetime.datetime.now() - datetime.timedelta(minutes=30)
share.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
db.session.add(share)
safe2 = share.share_id
@ -53,3 +53,29 @@ def test_cleanup(client, cli):
assert Share.query.filter_by(share_id=destroyed).first() is None
assert Share.query.filter_by(share_id=safe).first() is not None
assert Share.query.filter_by(share_id=safe2).first() is not None
# test stale uploads and chunks
test_share = Share()
db.session.add(test_share)
chk = Chunk(0, "upload1", 1, test_share, b"test_data")
chk.upload.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
db.session.add(chk)
chk_upload_id = chk.upload.upload_id
chk_safe = Chunk(0, "upload2", 1, test_share, b"test_data")
db.session.add(chk_safe)
chk_safe_upload_id = chk_safe.upload.upload_id
db.session.commit()
chk_id = chk.chunk_id
chk_safe_id = chk_safe.chunk_id
result = cli.invoke(cleanup)
assert result.exit_code == 0
assert Chunk.query.filter_by(chunk_id=chk_id).first() is None
assert Chunk.query.filter_by(chunk_id=chk_safe_id).first() is not None
assert Upload.query.filter_by(upload_id=chk_upload_id).first() is None
assert Upload.query.filter_by(upload_id=chk_safe_upload_id).first() is not None

View File

@ -2,6 +2,7 @@ import pytest
from os.path import basename
from io import BytesIO
from werkzeug.datastructures import FileStorage
from sachet.server.models import Upload, Chunk
from sachet.server import storage
import uuid