Compare commits
3 Commits
e2ec325540
...
47d4e36930
Author | SHA1 | Date | |
---|---|---|---|
47d4e36930 | |||
c584950f11 | |||
a90522ddb9 |
@ -6,6 +6,9 @@ from flask_marshmallow import Marshmallow
|
|||||||
from flask_bcrypt import Bcrypt
|
from flask_bcrypt import Bcrypt
|
||||||
from flask_migrate import Migrate
|
from flask_migrate import Migrate
|
||||||
from .config import DevelopmentConfig, ProductionConfig, TestingConfig, overlay_config
|
from .config import DevelopmentConfig, ProductionConfig, TestingConfig, overlay_config
|
||||||
|
from sqlalchemy import event
|
||||||
|
from sqlalchemy.engine import Engine
|
||||||
|
from sqlite3 import Connection as SQLite3Connection
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
CORS(app)
|
CORS(app)
|
||||||
@ -33,6 +36,15 @@ storage = None
|
|||||||
from sachet.storage import FileSystem
|
from sachet.storage import FileSystem
|
||||||
|
|
||||||
|
|
||||||
|
# https://stackoverflow.com/questions/57726047/
|
||||||
|
@event.listens_for(Engine, "connect")
|
||||||
|
def _set_sqlite_pragma(dbapi_connection, connection_record):
|
||||||
|
if isinstance(dbapi_connection, SQLite3Connection):
|
||||||
|
cursor = dbapi_connection.cursor()
|
||||||
|
cursor.execute("PRAGMA foreign_keys=ON;")
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
|
||||||
with app.app_context():
|
with app.app_context():
|
||||||
db.create_all()
|
db.create_all()
|
||||||
if _storage_method == "filesystem":
|
if _storage_method == "filesystem":
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import click
|
import click
|
||||||
from sachet.server import app, db
|
from sachet.server import app, db
|
||||||
from sachet.server.models import User, Share, Permissions
|
from sachet.server.models import User, Share, Permissions, Upload
|
||||||
from sachet.server.users import manage
|
from sachet.server.users import manage
|
||||||
from flask.cli import AppGroup
|
from flask.cli import AppGroup
|
||||||
from bitmask import Bitmask
|
from bitmask import Bitmask
|
||||||
@ -51,15 +51,21 @@ app.cli.add_command(user_cli)
|
|||||||
def cleanup():
|
def cleanup():
|
||||||
"""Clean up stale database entries.
|
"""Clean up stale database entries.
|
||||||
|
|
||||||
Shares that are not initialized are deleted if they are older than 25 minutes.
|
Uninitialized shares and unfinished uploads older than a day are deleted.
|
||||||
"""
|
"""
|
||||||
res = Share.query.filter(
|
res = Share.query.filter(
|
||||||
Share.create_date < (datetime.datetime.now() - datetime.timedelta(minutes=25)),
|
Share.create_date < (datetime.datetime.now() - datetime.timedelta(hours=24)),
|
||||||
# do not use `Share.initialized` or `is False` here
|
# do not use `Share.initialized` or `is False` here
|
||||||
# sqlalchemy doesn't like it
|
# sqlalchemy doesn't like it
|
||||||
|
# noqa: E712
|
||||||
Share.initialized == False,
|
Share.initialized == False,
|
||||||
)
|
)
|
||||||
res.delete()
|
res.delete()
|
||||||
|
|
||||||
|
res = Upload.query.filter(
|
||||||
|
Upload.create_date < (datetime.datetime.now() - datetime.timedelta(hours=24))
|
||||||
|
)
|
||||||
|
res.delete()
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import uuid
|
import uuid
|
||||||
import io
|
import io
|
||||||
from flask import Blueprint, request, jsonify, send_file
|
from flask import Blueprint, request, jsonify, send_file, make_response
|
||||||
from flask.views import MethodView
|
from flask.views import MethodView
|
||||||
from sachet.server.models import Share, Permissions, Upload, Chunk
|
from sachet.server.models import Share, Permissions, Upload, Chunk
|
||||||
from sachet.server.views_common import ModelAPI, ModelListAPI, auth_required
|
from sachet.server.views_common import ModelAPI, ModelListAPI, auth_required
|
||||||
@ -111,7 +111,9 @@ class FileContentAPI(MethodView):
|
|||||||
|
|
||||||
if upload.completed:
|
if upload.completed:
|
||||||
share.initialized = True
|
share.initialized = True
|
||||||
db.session.delete(upload)
|
# really convoluted
|
||||||
|
# but otherwise it doesn't cascade deletes?
|
||||||
|
Upload.query.filter(Upload.upload_id == upload.upload_id).delete()
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
return jsonify(dict(status="success", message="Upload completed.")), 201
|
return jsonify(dict(status="success", message="Upload completed.")), 201
|
||||||
else:
|
else:
|
||||||
@ -207,10 +209,16 @@ class FileContentAPI(MethodView):
|
|||||||
)
|
)
|
||||||
|
|
||||||
file = share.get_handle()
|
file = share.get_handle()
|
||||||
with file.open(mode="rb") as f:
|
|
||||||
data = f.read()
|
|
||||||
|
|
||||||
return send_file(io.BytesIO(data), download_name=share.file_name)
|
with file.open("rb") as f:
|
||||||
|
resp = make_response(
|
||||||
|
send_file(
|
||||||
|
io.BytesIO(f.read()),
|
||||||
|
download_name=share.file_name,
|
||||||
|
conditional=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
files_blueprint.add_url_rule(
|
files_blueprint.add_url_rule(
|
||||||
|
@ -334,9 +334,9 @@ class Upload(db.Model):
|
|||||||
|
|
||||||
chunks = db.relationship(
|
chunks = db.relationship(
|
||||||
"Chunk",
|
"Chunk",
|
||||||
backref=db.backref("upload"),
|
backref="upload",
|
||||||
|
passive_deletes=True,
|
||||||
order_by="Chunk.chunk_id",
|
order_by="Chunk.chunk_id",
|
||||||
cascade="all, delete",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, upload_id, total_chunks, share_id):
|
def __init__(self, upload_id, total_chunks, share_id):
|
||||||
@ -401,7 +401,9 @@ class Chunk(db.Model):
|
|||||||
chunk_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
chunk_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||||
create_date = db.Column(db.DateTime, nullable=False)
|
create_date = db.Column(db.DateTime, nullable=False)
|
||||||
index = db.Column(db.Integer, nullable=False)
|
index = db.Column(db.Integer, nullable=False)
|
||||||
upload_id = db.Column(db.String, db.ForeignKey("uploads.upload_id"))
|
upload_id = db.Column(
|
||||||
|
db.String, db.ForeignKey("uploads.upload_id", ondelete="CASCADE")
|
||||||
|
)
|
||||||
filename = db.Column(db.String, nullable=False)
|
filename = db.Column(db.String, nullable=False)
|
||||||
|
|
||||||
def __init__(self, index, upload_id, total_chunks, share, data):
|
def __init__(self, index, upload_id, total_chunks, share, data):
|
||||||
@ -421,9 +423,11 @@ class Chunk(db.Model):
|
|||||||
with file.open(mode="wb") as f:
|
with file.open(mode="wb") as f:
|
||||||
f.write(data)
|
f.write(data)
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def __declare_last__(cls):
|
@event.listens_for(db.session, "persistent_to_deleted")
|
||||||
@event.listens_for(cls, "before_delete")
|
def chunk_delete_listener(session, instance):
|
||||||
def chunk_before_delete(mapper, connection, chunk):
|
# kinda hacky but i have no idea how to trigger event listener on cascaded delete
|
||||||
|
if isinstance(instance, Upload):
|
||||||
|
for chunk in instance.chunks:
|
||||||
file = storage.get_file(chunk.filename)
|
file = storage.get_file(chunk.filename)
|
||||||
file.delete()
|
file.delete()
|
||||||
|
@ -8,14 +8,13 @@ class Storage:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def list_files(self):
|
def list_files(self):
|
||||||
"""Lists all files.
|
"""List all files.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
list of File
|
list of File
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_file(self, name):
|
def get_file(self, name):
|
||||||
@ -29,7 +28,6 @@ class Storage:
|
|||||||
Filename to access.
|
Filename to access.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class File:
|
class File:
|
||||||
@ -41,6 +39,8 @@ class Storage:
|
|||||||
----------
|
----------
|
||||||
name : str
|
name : str
|
||||||
Filename
|
Filename
|
||||||
|
size : int
|
||||||
|
Size in bytes of the file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def open(self, mode="r"):
|
def open(self, mode="r"):
|
||||||
@ -57,12 +57,10 @@ class Storage:
|
|||||||
Stream to access the file (just like the builtin `open()`.)
|
Stream to access the file (just like the builtin `open()`.)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def delete(self):
|
def delete(self):
|
||||||
"""Delete file."""
|
"""Delete file."""
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def rename(self, new_name):
|
def rename(self, new_name):
|
||||||
@ -73,7 +71,6 @@ class Storage:
|
|||||||
new_name : str
|
new_name : str
|
||||||
New name for the file.
|
New name for the file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,3 +55,7 @@ class FileSystem(Storage):
|
|||||||
raise OSError(f"Path {path} already exists.")
|
raise OSError(f"Path {path} already exists.")
|
||||||
|
|
||||||
self._path.rename(new_path)
|
self._path.rename(new_path)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self):
|
||||||
|
return self._path.stat().st_size
|
||||||
|
@ -3,7 +3,7 @@ from sachet.server.commands import create_user, delete_user, cleanup
|
|||||||
from sqlalchemy import inspect
|
from sqlalchemy import inspect
|
||||||
from sachet.server import db
|
from sachet.server import db
|
||||||
import datetime
|
import datetime
|
||||||
from sachet.server.models import User, Share
|
from sachet.server.models import User, Share, Chunk, Upload
|
||||||
|
|
||||||
|
|
||||||
def test_user(client, cli):
|
def test_user(client, cli):
|
||||||
@ -28,12 +28,12 @@ def test_user(client, cli):
|
|||||||
|
|
||||||
|
|
||||||
def test_cleanup(client, cli):
|
def test_cleanup(client, cli):
|
||||||
"""Test the CLI's ability to destroy uninitialized shares past expiry."""
|
"""Test the CLI's ability to destroy stale entries."""
|
||||||
# create shares
|
# create shares
|
||||||
# this one will be destroyed
|
# this one will be destroyed
|
||||||
share = Share()
|
share = Share()
|
||||||
db.session.add(share)
|
db.session.add(share)
|
||||||
share.create_date = datetime.datetime.now() - datetime.timedelta(minutes=30)
|
share.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
|
||||||
destroyed = share.share_id
|
destroyed = share.share_id
|
||||||
# this one won't
|
# this one won't
|
||||||
share = Share()
|
share = Share()
|
||||||
@ -42,7 +42,7 @@ def test_cleanup(client, cli):
|
|||||||
# this one neither
|
# this one neither
|
||||||
share = Share()
|
share = Share()
|
||||||
share.initialized = True
|
share.initialized = True
|
||||||
share.create_date = datetime.datetime.now() - datetime.timedelta(minutes=30)
|
share.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
|
||||||
db.session.add(share)
|
db.session.add(share)
|
||||||
safe2 = share.share_id
|
safe2 = share.share_id
|
||||||
|
|
||||||
@ -53,3 +53,29 @@ def test_cleanup(client, cli):
|
|||||||
assert Share.query.filter_by(share_id=destroyed).first() is None
|
assert Share.query.filter_by(share_id=destroyed).first() is None
|
||||||
assert Share.query.filter_by(share_id=safe).first() is not None
|
assert Share.query.filter_by(share_id=safe).first() is not None
|
||||||
assert Share.query.filter_by(share_id=safe2).first() is not None
|
assert Share.query.filter_by(share_id=safe2).first() is not None
|
||||||
|
|
||||||
|
# test stale uploads and chunks
|
||||||
|
|
||||||
|
test_share = Share()
|
||||||
|
db.session.add(test_share)
|
||||||
|
|
||||||
|
chk = Chunk(0, "upload1", 1, test_share, b"test_data")
|
||||||
|
chk.upload.create_date = datetime.datetime.now() - datetime.timedelta(hours=30)
|
||||||
|
db.session.add(chk)
|
||||||
|
chk_upload_id = chk.upload.upload_id
|
||||||
|
|
||||||
|
chk_safe = Chunk(0, "upload2", 1, test_share, b"test_data")
|
||||||
|
db.session.add(chk_safe)
|
||||||
|
chk_safe_upload_id = chk_safe.upload.upload_id
|
||||||
|
|
||||||
|
db.session.commit()
|
||||||
|
|
||||||
|
chk_id = chk.chunk_id
|
||||||
|
chk_safe_id = chk_safe.chunk_id
|
||||||
|
|
||||||
|
result = cli.invoke(cleanup)
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert Chunk.query.filter_by(chunk_id=chk_id).first() is None
|
||||||
|
assert Chunk.query.filter_by(chunk_id=chk_safe_id).first() is not None
|
||||||
|
assert Upload.query.filter_by(upload_id=chk_upload_id).first() is None
|
||||||
|
assert Upload.query.filter_by(upload_id=chk_safe_upload_id).first() is not None
|
||||||
|
@ -2,6 +2,7 @@ import pytest
|
|||||||
from os.path import basename
|
from os.path import basename
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from werkzeug.datastructures import FileStorage
|
from werkzeug.datastructures import FileStorage
|
||||||
|
from sachet.server.models import Upload, Chunk
|
||||||
from sachet.server import storage
|
from sachet.server import storage
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
@ -268,3 +269,58 @@ class TestSuite:
|
|||||||
headers=auth("no_lock_user"),
|
headers=auth("no_lock_user"),
|
||||||
)
|
)
|
||||||
assert resp.status_code == 403
|
assert resp.status_code == 403
|
||||||
|
|
||||||
|
def test_partial(self, client, users, auth, rand, upload):
|
||||||
|
# create share
|
||||||
|
resp = client.post(
|
||||||
|
"/files", headers=auth("jeff"), json={"file_name": "content.bin"}
|
||||||
|
)
|
||||||
|
assert resp.status_code == 201
|
||||||
|
|
||||||
|
data = resp.get_json()
|
||||||
|
url = data.get("url")
|
||||||
|
|
||||||
|
upload_data = b"1234567890" * 400
|
||||||
|
|
||||||
|
resp = upload(
|
||||||
|
url + "/content",
|
||||||
|
BytesIO(upload_data),
|
||||||
|
headers=auth("jeff"),
|
||||||
|
chunk_size=1230,
|
||||||
|
)
|
||||||
|
assert resp.status_code == 201
|
||||||
|
|
||||||
|
# test the following ranges
|
||||||
|
ranges = [
|
||||||
|
[0, 1],
|
||||||
|
[1, 1],
|
||||||
|
[2, 300],
|
||||||
|
[300, 30],
|
||||||
|
[3, 4],
|
||||||
|
[30, 3999],
|
||||||
|
[4000, 4000],
|
||||||
|
[3999, 39999],
|
||||||
|
[40000, 0],
|
||||||
|
[48000, 9],
|
||||||
|
[-1, 0],
|
||||||
|
[-2, 3],
|
||||||
|
[0, 4000],
|
||||||
|
[0, ""],
|
||||||
|
]
|
||||||
|
|
||||||
|
for r in ranges:
|
||||||
|
resp = client.get(
|
||||||
|
url + "/content",
|
||||||
|
headers=auth("jeff", data={"Range": f"bytes={r[0]}-{r[1]}"}),
|
||||||
|
)
|
||||||
|
if r[1] == "":
|
||||||
|
r[1] = len(upload_data)
|
||||||
|
# apparently if you specify an endpoint past the end
|
||||||
|
# it just truncates the response to the end
|
||||||
|
if r[0] < 0 or r[0] >= 4000:
|
||||||
|
assert resp.status_code == 416
|
||||||
|
elif r[0] > r[1]:
|
||||||
|
assert resp.status_code == 416
|
||||||
|
else:
|
||||||
|
assert resp.status_code == 206
|
||||||
|
assert resp.data == upload_data[r[0] : r[1] + 1]
|
||||||
|
@ -10,15 +10,20 @@ from uuid import UUID
|
|||||||
@pytest.mark.parametrize("client", [{"SACHET_STORAGE": "filesystem"}], indirect=True)
|
@pytest.mark.parametrize("client", [{"SACHET_STORAGE": "filesystem"}], indirect=True)
|
||||||
class TestSuite:
|
class TestSuite:
|
||||||
def test_creation(self, client, rand):
|
def test_creation(self, client, rand):
|
||||||
"""Test the process of creating, writing, then reading files, and also listing files."""
|
"""Test file pipeline.
|
||||||
|
|
||||||
files = [
|
Creating, writing, reading, listing, reading size of files."""
|
||||||
dict(
|
|
||||||
name=str(UUID(bytes=rand.randbytes(16))),
|
files = []
|
||||||
data=rand.randbytes(4000),
|
for i in range(25):
|
||||||
|
data_size = rand.randint(3000, 5000)
|
||||||
|
files.append(
|
||||||
|
dict(
|
||||||
|
name=str(UUID(bytes=rand.randbytes(16))),
|
||||||
|
data_size=data_size,
|
||||||
|
data=rand.randbytes(data_size),
|
||||||
|
)
|
||||||
)
|
)
|
||||||
for i in range(25)
|
|
||||||
]
|
|
||||||
|
|
||||||
for file in files:
|
for file in files:
|
||||||
handle = storage.get_file(file["name"])
|
handle = storage.get_file(file["name"])
|
||||||
@ -30,6 +35,7 @@ class TestSuite:
|
|||||||
with handle.open(mode="rb") as f:
|
with handle.open(mode="rb") as f:
|
||||||
saved_data = f.read()
|
saved_data = f.read()
|
||||||
assert saved_data == file["data"]
|
assert saved_data == file["data"]
|
||||||
|
assert handle.size == file["data_size"]
|
||||||
|
|
||||||
assert sorted([f.name for f in storage.list_files()]) == sorted(
|
assert sorted([f.name for f in storage.list_files()]) == sorted(
|
||||||
[f["name"] for f in files]
|
[f["name"] for f in files]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user