Compare commits
3 Commits
e2ec325540
...
47d4e36930
Author | SHA1 | Date | |
---|---|---|---|
47d4e36930 | |||
c584950f11 | |||
a90522ddb9 |
@ -6,6 +6,9 @@ from flask_marshmallow import Marshmallow
|
||||
from flask_bcrypt import Bcrypt
|
||||
from flask_migrate import Migrate
|
||||
from .config import DevelopmentConfig, ProductionConfig, TestingConfig, overlay_config
|
||||
from sqlalchemy import event
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlite3 import Connection as SQLite3Connection
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app)
|
||||
@ -33,6 +36,15 @@ storage = None
|
||||
from sachet.storage import FileSystem
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/57726047/
|
||||
@event.listens_for(Engine, "connect")
|
||||
def _set_sqlite_pragma(dbapi_connection, connection_record):
|
||||
if isinstance(dbapi_connection, SQLite3Connection):
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute("PRAGMA foreign_keys=ON;")
|
||||
cursor.close()
|
||||
|
||||
|
||||
with app.app_context():
|
||||
db.create_all()
|
||||
if _storage_method == "filesystem":
|
||||
|
@ -1,6 +1,6 @@
|
||||
import click
|
||||
from sachet.server import app, db
|
||||
from sachet.server.models import User, Share, Permissions
|
||||
from sachet.server.models import User, Share, Permissions, Upload
|
||||
from sachet.server.users import manage
|
||||
from flask.cli import AppGroup
|
||||
from bitmask import Bitmask
|
||||
@ -51,15 +51,21 @@ app.cli.add_command(user_cli)
|
||||
def cleanup():
|
||||
"""Clean up stale database entries.
|
||||
|
||||
Shares that are not initialized are deleted if they are older than 25 minutes.
|
||||
Uninitialized shares and unfinished uploads older than a day are deleted.
|
||||
"""
|
||||
res = Share.query.filter(
|
||||
Share.create_date < (datetime.datetime.now() - datetime.timedelta(minutes=25)),
|
||||
Share.create_date < (datetime.datetime.now() - datetime.timedelta(hours=24)),
|
||||
# do not use `Share.initialized` or `is False` here
|
||||
# sqlalchemy doesn't like it
|
||||
# noqa: E712
|
||||
Share.initialized == False,
|
||||
)
|
||||
res.delete()
|
||||
|
||||
res = Upload.query.filter(
|
||||
Upload.create_date < (datetime.datetime.now() - datetime.timedelta(hours=24))
|
||||
)
|
||||
res.delete()
|
||||
db.session.commit()
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
import uuid
|
||||
import io
|
||||
from flask import Blueprint, request, jsonify, send_file
|
||||
from flask import Blueprint, request, jsonify, send_file, make_response
|
||||
from flask.views import MethodView
|
||||
from sachet.server.models import Share, Permissions, Upload, Chunk
|
||||
from sachet.server.views_common import ModelAPI, ModelListAPI, auth_required
|
||||
@ -111,7 +111,9 @@ class FileContentAPI(MethodView):
|
||||
|
||||
if upload.completed:
|
||||
share.initialized = True
|
||||
db.session.delete(upload)
|
||||
# really convoluted
|
||||
# but otherwise it doesn't cascade deletes?
|
||||
Upload.query.filter(Upload.upload_id == upload.upload_id).delete()
|
||||
db.session.commit()
|
||||
return jsonify(dict(status="success", message="Upload completed.")), 201
|
||||
else:
|
||||
@ -207,10 +209,16 @@ class FileContentAPI(MethodView):
|
||||
)
|
||||
|
||||
file = share.get_handle()
|
||||
with file.open(mode="rb") as f:
|
||||
data = f.read()
|
||||
|
||||
return send_file(io.BytesIO(data), download_name=share.file_name)
|
||||
with file.open("rb") as f:
|
||||
resp = make_response(
|
||||
send_file(
|
||||
io.BytesIO(f.read()),
|
||||
download_name=share.file_name,
|
||||
conditional=True,
|
||||
)
|
||||
)
|
||||
return resp
|
||||
|
||||
|
||||
files_blueprint.add_url_rule(
|
||||
|
@ -334,9 +334,9 @@ class Upload(db.Model):
|
||||
|
||||
chunks = db.relationship(
|
||||
"Chunk",
|
||||
backref=db.backref("upload"),
|
||||
backref="upload",
|
||||
passive_deletes=True,
|
||||
order_by="Chunk.chunk_id",
|
||||
cascade="all, delete",
|
||||
)
|
||||
|
||||
def __init__(self, upload_id, total_chunks, share_id):
|
||||
@ -401,7 +401,9 @@ class Chunk(db.Model):
|
||||
chunk_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
create_date = db.Column(db.DateTime, nullable=False)
|
||||
index = db.Column(db.Integer, nullable=False)
|
||||
upload_id = db.Column(db.String, db.ForeignKey("uploads.upload_id"))
|
||||
upload_id = db.Column(
|
||||
db.String, db.ForeignKey("uploads.upload_id", ondelete="CASCADE")
|
||||
)
|
||||
filename = db.Column(db.String, nullable=False)
|
||||
|
||||
def __init__(self, index, upload_id, total_chunks, share, data):
|
||||
@ -421,9 +423,11 @@ class Chunk(db.Model):
|
||||
with file.open(mode="wb") as f:
|
||||
f.write(data)
|
||||
|
||||
@classmethod
|
||||
def __declare_last__(cls):
|
||||
@event.listens_for(cls, "before_delete")
|
||||
def chunk_before_delete(mapper, connection, chunk):
|
||||
|
||||
@event.listens_for(db.session, "persistent_to_deleted")
|
||||
def chunk_delete_listener(session, instance):
|
||||
# kinda hacky but i have no idea how to trigger event listener on cascaded delete
|
||||
if isinstance(instance, Upload):
|
||||
for chunk in instance.chunks:
|
||||
file = storage.get_file(chunk.filename)
|
||||
file.delete()
|
||||
|
@ -8,14 +8,13 @@ class Storage:
|
||||
"""
|
||||
|
||||
def list_files(self):
|
||||
"""Lists all files.
|
||||
"""List all files.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list of File
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def get_file(self, name):
|
||||
@ -29,7 +28,6 @@ class Storage:
|
||||
Filename to access.
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
class File:
|
||||
@ -41,6 +39,8 @@ class Storage:
|
||||
----------
|
||||
name : str
|
||||
Filename
|
||||
size : int
|
||||
Size in bytes of the file.
|
||||
"""
|
||||
|
||||
def open(self, mode="r"):
|
||||
@ -57,12 +57,10 @@ class Storage:
|
||||
Stream to access the file (just like the builtin `open()`.)
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def delete(self):
|
||||
"""Delete file."""
|
||||
|
||||
pass
|
||||
|
||||
def rename(self, new_name):
|
||||
@ -73,7 +71,6 @@ class Storage:
|
||||
new_name : str
|
||||
New name for the file.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
|
@ -55,3 +55,7 @@ class FileSystem(Storage):
|
||||
raise OSError(f"Path {path} already exists.")
|
||||
|
||||
self._path.rename(new_path)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return self._path.stat().st_size
|
||||
|
@ -3,7 +3,7 @@ from sachet.server.commands import create_user, delete_user, cleanup
|
||||
from sqlalchemy import inspect
|
||||
from sachet.server import db
|
||||
import datetime
|
||||
from sachet.server.models import User, Share
|
||||
from sachet.server.models import User, Share, Chunk, Upload
|
||||
|
||||
|
||||
def test_user(client, cli):
|
||||
@ -28,12 +28,12 @@ def test_user(client, cli):
|
||||
|
||||
|
||||
def test_cleanup(client, cli):
|
||||
"""Test the CLI's ability to destroy uninitialized shares past expiry."""
|
||||
"""Test the CLI's ability to destroy stale entries."""
|
||||
# create shares
|
||||
# this one will be destroyed
|
||||
share = Share()
|
||||
db.session.add(share)
|
||||
share.create_date = datetime.datetime.now() - datetime.timedelta(minutes=30)
|
||||
share.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
|
||||
destroyed = share.share_id
|
||||
# this one won't
|
||||
share = Share()
|
||||
@ -42,7 +42,7 @@ def test_cleanup(client, cli):
|
||||
# this one neither
|
||||
share = Share()
|
||||
share.initialized = True
|
||||
share.create_date = datetime.datetime.now() - datetime.timedelta(minutes=30)
|
||||
share.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
|
||||
db.session.add(share)
|
||||
safe2 = share.share_id
|
||||
|
||||
@ -53,3 +53,29 @@ def test_cleanup(client, cli):
|
||||
assert Share.query.filter_by(share_id=destroyed).first() is None
|
||||
assert Share.query.filter_by(share_id=safe).first() is not None
|
||||
assert Share.query.filter_by(share_id=safe2).first() is not None
|
||||
|
||||
# test stale uploads and chunks
|
||||
|
||||
test_share = Share()
|
||||
db.session.add(test_share)
|
||||
|
||||
chk = Chunk(0, "upload1", 1, test_share, b"test_data")
|
||||
chk.upload.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
|
||||
db.session.add(chk)
|
||||
chk_upload_id = chk.upload.upload_id
|
||||
|
||||
chk_safe = Chunk(0, "upload2", 1, test_share, b"test_data")
|
||||
db.session.add(chk_safe)
|
||||
chk_safe_upload_id = chk_safe.upload.upload_id
|
||||
|
||||
db.session.commit()
|
||||
|
||||
chk_id = chk.chunk_id
|
||||
chk_safe_id = chk_safe.chunk_id
|
||||
|
||||
result = cli.invoke(cleanup)
|
||||
assert result.exit_code == 0
|
||||
assert Chunk.query.filter_by(chunk_id=chk_id).first() is None
|
||||
assert Chunk.query.filter_by(chunk_id=chk_safe_id).first() is not None
|
||||
assert Upload.query.filter_by(upload_id=chk_upload_id).first() is None
|
||||
assert Upload.query.filter_by(upload_id=chk_safe_upload_id).first() is not None
|
||||
|
@ -2,6 +2,7 @@ import pytest
|
||||
from os.path import basename
|
||||
from io import BytesIO
|
||||
from werkzeug.datastructures import FileStorage
|
||||
from sachet.server.models import Upload, Chunk
|
||||
from sachet.server import storage
|
||||
import uuid
|
||||
|
||||
@ -268,3 +269,58 @@ class TestSuite:
|
||||
headers=auth("no_lock_user"),
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
|
||||
def test_partial(self, client, users, auth, rand, upload):
|
||||
# create share
|
||||
resp = client.post(
|
||||
"/files", headers=auth("jeff"), json={"file_name": "content.bin"}
|
||||
)
|
||||
assert resp.status_code == 201
|
||||
|
||||
data = resp.get_json()
|
||||
url = data.get("url")
|
||||
|
||||
upload_data = b"1234567890" * 400
|
||||
|
||||
resp = upload(
|
||||
url + "/content",
|
||||
BytesIO(upload_data),
|
||||
headers=auth("jeff"),
|
||||
chunk_size=1230,
|
||||
)
|
||||
assert resp.status_code == 201
|
||||
|
||||
# test the following ranges
|
||||
ranges = [
|
||||
[0, 1],
|
||||
[1, 1],
|
||||
[2, 300],
|
||||
[300, 30],
|
||||
[3, 4],
|
||||
[30, 3999],
|
||||
[4000, 4000],
|
||||
[3999, 39999],
|
||||
[40000, 0],
|
||||
[48000, 9],
|
||||
[-1, 0],
|
||||
[-2, 3],
|
||||
[0, 4000],
|
||||
[0, ""],
|
||||
]
|
||||
|
||||
for r in ranges:
|
||||
resp = client.get(
|
||||
url + "/content",
|
||||
headers=auth("jeff", data={"Range": f"bytes={r[0]}-{r[1]}"}),
|
||||
)
|
||||
if r[1] == "":
|
||||
r[1] = len(upload_data)
|
||||
# apparently if you specify an endpoint past the end
|
||||
# it just truncates the response to the end
|
||||
if r[0] < 0 or r[0] >= 4000:
|
||||
assert resp.status_code == 416
|
||||
elif r[0] > r[1]:
|
||||
assert resp.status_code == 416
|
||||
else:
|
||||
assert resp.status_code == 206
|
||||
assert resp.data == upload_data[r[0] : r[1] + 1]
|
||||
|
@ -10,15 +10,20 @@ from uuid import UUID
|
||||
@pytest.mark.parametrize("client", [{"SACHET_STORAGE": "filesystem"}], indirect=True)
|
||||
class TestSuite:
|
||||
def test_creation(self, client, rand):
|
||||
"""Test the process of creating, writing, then reading files, and also listing files."""
|
||||
"""Test file pipeline.
|
||||
|
||||
files = [
|
||||
dict(
|
||||
name=str(UUID(bytes=rand.randbytes(16))),
|
||||
data=rand.randbytes(4000),
|
||||
Creating, writing, reading, listing, reading size of files."""
|
||||
|
||||
files = []
|
||||
for i in range(25):
|
||||
data_size = rand.randint(3000, 5000)
|
||||
files.append(
|
||||
dict(
|
||||
name=str(UUID(bytes=rand.randbytes(16))),
|
||||
data_size=data_size,
|
||||
data=rand.randbytes(data_size),
|
||||
)
|
||||
)
|
||||
for i in range(25)
|
||||
]
|
||||
|
||||
for file in files:
|
||||
handle = storage.get_file(file["name"])
|
||||
@ -30,6 +35,7 @@ class TestSuite:
|
||||
with handle.open(mode="rb") as f:
|
||||
saved_data = f.read()
|
||||
assert saved_data == file["data"]
|
||||
assert handle.size == file["data_size"]
|
||||
|
||||
assert sorted([f.name for f in storage.list_files()]) == sorted(
|
||||
[f["name"] for f in files]
|
||||
|
Loading…
x
Reference in New Issue
Block a user