Compare commits

..

8 commits

18 changed files with 528 additions and 291 deletions

View file

@ -9,7 +9,8 @@ const services = struct {
const communities = @import("./services/communities.zig");
const actors = @import("./services/actors.zig");
const auth = @import("./services/auth.zig");
const drive = @import("./services/files.zig");
const drive = @import("./services/drive.zig");
const files = @import("./services/files.zig");
const invites = @import("./services/invites.zig");
const notes = @import("./services/notes.zig");
const follows = @import("./services/follows.zig");
@ -139,12 +140,30 @@ pub const FollowingQueryResult = FollowQueryResult;
pub const UploadFileArgs = struct {
filename: []const u8,
dir: ?[]const u8,
dir: []const u8,
description: ?[]const u8,
content_type: []const u8,
sensitive: bool,
};
pub const DriveEntry = services.drive.DriveEntry;
pub const FileUpload = services.files.FileUpload;
pub const DriveGetResult = union(services.drive.Kind) {
dir: struct {
entry: DriveEntry,
children: []DriveEntry,
},
file: struct {
entry: DriveEntry,
file: FileUpload,
},
};
pub const FileResult = struct {
meta: services.files.FileUpload,
data: []const u8,
};
pub fn isAdminSetup(db: sql.Db) !bool {
_ = services.communities.adminCommunityId(db) catch |err| switch (err) {
error.NotFound => return false,
@ -519,22 +538,98 @@ fn ApiConn(comptime DbConn: type) type {
);
}
pub fn uploadFile(self: *Self, meta: UploadFileArgs, body: []const u8) !void {
pub fn driveUpload(self: *Self, meta: UploadFileArgs, body: []const u8) !void {
const user_id = self.user_id orelse return error.NoToken;
return try services.drive.createFile(self.db, .{
.dir = meta.dir orelse "/",
const file_id = try services.files.create(self.db, user_id, .{
.filename = meta.filename,
.owner = .{ .user_id = user_id },
.created_by = user_id,
.description = meta.description,
.content_type = meta.content_type,
.sensitive = meta.sensitive,
}, body, self.allocator);
errdefer services.files.delete(self.db, file_id, self.allocator) catch |err| {
std.log.err("Unable to delete file {}: {}", .{ file_id, err });
};
services.drive.create(self.db, user_id, meta.dir, meta.filename, file_id, self.allocator) catch |err| switch (err) {
error.PathAlreadyExists => {
var buf: [256]u8 = undefined;
var split = std.mem.splitBackwards(u8, meta.filename, ".");
const ext = split.first();
const name = split.rest();
const new_name = try std.fmt.bufPrint(&buf, "{s}.{s}.{s}", .{ name, file_id, ext });
try services.drive.create(
self.db,
user_id,
meta.dir,
new_name,
file_id,
self.allocator,
);
},
else => |e| return e,
};
}
pub fn driveMkdir(self: *Self, path: []const u8) !void {
const user_id = self.user_id orelse return error.NoToken;
try services.drive.mkdir(self.db, .{ .user_id = user_id }, path, self.allocator);
var split = std.mem.splitBackwards(u8, path, "/");
std.log.debug("{s}", .{path});
const base = split.first();
const dir = split.rest();
try services.drive.create(self.db, user_id, dir, base, null, self.allocator);
}
pub fn driveDelete(self: *Self, path: []const u8) !void {
const user_id = self.user_id orelse return error.NoToken;
const entry = try services.drive.stat(self.db, user_id, path, self.allocator);
defer util.deepFree(self.allocator, entry);
try services.drive.delete(self.db, entry.id, self.allocator);
if (entry.file_id) |file_id| try services.files.delete(self.db, file_id, self.allocator);
}
pub fn driveMove(self: *Self, src: []const u8, dest: []const u8) !void {
const user_id = self.user_id orelse return error.NoToken;
try services.drive.move(self.db, user_id, src, dest, self.allocator);
}
pub fn driveGet(self: *Self, path: []const u8) !DriveGetResult {
const user_id = self.user_id orelse return error.NoToken;
const entry = try services.drive.stat(self.db, user_id, path, self.allocator);
errdefer util.deepFree(self.allocator, entry);
if (entry.file_id) |file_id| return .{
.file = .{
.entry = entry,
.file = try services.files.get(self.db, file_id, self.allocator),
},
} else return .{
.dir = .{
.entry = entry,
.children = try services.drive.list(self.db, entry.id, self.allocator),
},
};
}
pub fn driveUpdate(self: *Self, path: []const u8, meta: services.files.PartialMeta) !void {
const user_id = self.user_id orelse return error.NoToken;
std.log.debug("{s}", .{path});
const entry = try services.drive.stat(self.db, user_id, path, self.allocator);
errdefer util.deepFree(self.allocator, entry);
std.log.debug("{}", .{entry.id});
try services.files.update(self.db, entry.file_id orelse return error.NotAFile, meta, self.allocator);
}
pub fn fileDereference(self: *Self, id: Uuid) !FileResult {
const meta = try services.files.get(self.db, id, self.allocator);
errdefer util.deepFree(self.allocator, meta);
return FileResult{
.meta = meta,
.data = try services.files.deref(self.allocator, id),
};
}
};
}

View file

@ -55,6 +55,10 @@ pub fn register(
.hash = hash,
.changed_at = DateTime.now(),
}, alloc) catch return error.DatabaseFailure;
tx.insert("drive_entry", .{
.id = id,
.owner_id = id,
}, alloc) catch return error.DatabaseFailure;
tx.commitOrRelease() catch return error.DatabaseFailure;

View file

@ -3,6 +3,7 @@ const builtin = @import("builtin");
const util = @import("util");
const sql = @import("sql");
const common = @import("./common.zig");
const actors = @import("./actors.zig");
const Uuid = util.Uuid;
const DateTime = util.DateTime;
@ -38,11 +39,10 @@ pub const CreateOptions = struct {
};
pub const CreateError = error{
DatabaseFailure,
UnsupportedScheme,
InvalidOrigin,
CommunityExists,
};
} || sql.DatabaseError;
pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: std.mem.Allocator) CreateError!Uuid {
const scheme_len = std.mem.indexOfScalar(u8, origin, ':') orelse return error.InvalidOrigin;
@ -71,7 +71,9 @@ pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: st
const id = Uuid.randV4(util.getThreadPrng());
// TODO: wrap this in TX
if (db.queryRow(
var tx = try db.beginOrSavepoint();
errdefer tx.rollback();
if (tx.queryRow(
std.meta.Tuple(&.{Uuid}),
"SELECT id FROM community WHERE host = $1",
.{host},
@ -80,11 +82,11 @@ pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: st
return error.CommunityExists;
} else |err| switch (err) {
error.NoRows => {},
else => return error.DatabaseFailure,
else => |e| return e,
}
const name = options.name orelse host;
db.insert("community", .{
try tx.insert("community", .{
.id = id,
.owner_id = null,
.host = host,
@ -92,8 +94,26 @@ pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: st
.scheme = scheme,
.kind = options.kind,
.created_at = DateTime.now(),
}, alloc) catch return error.DatabaseFailure;
}, alloc);
if (options.kind == .local) {
const actor_id = actors.create(tx, "community.actor", id, alloc) catch |err| switch (err) {
error.UsernameContainsInvalidChar,
error.UsernameTooLong,
error.UsernameEmpty,
error.UsernameTaken,
=> unreachable,
else => @panic("TODO"),
};
try tx.exec(
\\UPDATE community
\\SET community_actor_id = $1
\\WHERE id = $2
\\LIMIT 1
, .{ actor_id, id }, alloc);
}
try tx.commitOrRelease();
return id;
}

129
src/api/services/drive.zig Normal file
View file

@ -0,0 +1,129 @@
const std = @import("std");
const util = @import("util");
const sql = @import("sql");
const Uuid = util.Uuid;
const DateTime = util.DateTime;
pub const DriveOwner = union(enum) {
user_id: Uuid,
community_id: Uuid,
};
pub const DriveEntry = struct {
id: Uuid,
owner_id: Uuid,
name: ?[]const u8,
path: []const u8,
parent_directory_id: ?Uuid,
file_id: ?Uuid,
kind: Kind,
};
pub const Kind = enum {
dir,
file,
pub const jsonStringify = util.jsonSerializeEnumAsString;
};
pub fn stat(db: anytype, owner: Uuid, path: []const u8, alloc: std.mem.Allocator) !DriveEntry {
return (db.queryRow(DriveEntry,
\\SELECT id, path, owner_id, name, file_id, kind, parent_directory_id
\\FROM drive_entry_path
\\WHERE owner_id = $1 AND path = ('/' || $2)
\\LIMIT 1
, .{
owner,
std.mem.trim(u8, path, "/"),
}, alloc) catch |err| switch (err) {
error.NoRows => return error.NotFound,
else => |e| return e,
});
}
/// Creates a file or directory
pub fn create(db: anytype, owner: Uuid, dir: []const u8, name: []const u8, file_id: ?Uuid, alloc: std.mem.Allocator) !void {
if (name.len == 0) return error.EmptyName;
const id = Uuid.randV4(util.getThreadPrng());
const tx = try db.begin();
errdefer tx.rollback();
const parent = try stat(tx, owner, dir, alloc);
defer util.deepFree(alloc, parent);
tx.insert("drive_entry", .{
.id = id,
.owner_id = owner,
.name = name,
.parent_directory_id = parent.id,
.file_id = file_id,
}, alloc) catch |err| switch (err) {
error.UniqueViolation => return error.PathAlreadyExists,
else => |e| return e,
};
try tx.commit();
}
pub fn delete(db: anytype, id: Uuid, alloc: std.mem.Allocator) !void {
const tx = try db.beginOrSavepoint();
errdefer tx.rollback();
if ((try tx.queryRow(
std.meta.Tuple(&.{usize}),
\\SELECT COUNT(1)
\\FROM drive_entry
\\WHERE parent_directory_id = $1
,
.{id},
alloc,
))[0] != 0) {
return error.DirectoryNotEmpty;
}
try tx.exec("DELETE FROM drive_entry WHERE id = $1", .{id}, alloc);
try tx.commitOrRelease();
}
pub fn move(db: anytype, owner: Uuid, src: []const u8, dest: []const u8, alloc: std.mem.Allocator) !void {
const tx = try db.beginOrSavepoint();
errdefer tx.rollback();
const val = try stat(tx, owner, src, alloc);
defer util.deepFree(alloc, val);
if (val.parent_directory_id == null) return error.RootDirectory;
var split = std.mem.splitBackwards(u8, std.mem.trim(u8, dest, "/"), "/");
const name = split.first();
const dir = split.rest();
const parent = try stat(tx, owner, dir, alloc);
defer util.deepFree(alloc, parent);
try tx.exec(
\\UPDATE drive_entry
\\SET name = $1, parent_directory_id = $2
\\WHERE id = $3
,
.{ name, parent.id, val.id },
alloc,
);
try tx.commitOrRelease();
}
// TODO: paginate this
pub fn list(db: anytype, id: Uuid, alloc: std.mem.Allocator) ![]DriveEntry {
return (db.queryRows(DriveEntry,
\\SELECT id, path, owner_id, name, file_id, kind, parent_directory_id
\\FROM drive_entry_path
\\WHERE parent_directory_id = $1
, .{id}, null, alloc) catch |err| switch (err) {
error.NoRows => return error.NotFound,
else => |e| return e,
});
}

View file

@ -1,203 +1,156 @@
const std = @import("std");
const sql = @import("sql");
const util = @import("util");
const Uuid = util.Uuid;
const DateTime = util.DateTime;
pub const FileOwner = union(enum) {
user_id: Uuid,
community_id: Uuid,
pub const FileStatus = enum {
uploading,
uploaded,
external,
deleted,
pub const jsonStringify = util.jsonSerializeEnumAsString;
};
pub const DriveFile = struct {
pub const FileUpload = struct {
id: Uuid,
path: []const u8,
filename: []const u8,
owner: FileOwner,
owner_id: Uuid,
size: usize,
description: []const u8,
content_type: []const u8,
filename: []const u8,
description: ?[]const u8,
content_type: ?[]const u8,
sensitive: bool,
status: FileStatus,
created_at: DateTime,
updated_at: DateTime,
};
const EntryType = enum {
dir,
file,
};
pub const CreateFileArgs = struct {
dir: []const u8,
pub const FileMeta = struct {
filename: []const u8,
owner: FileOwner,
created_by: Uuid,
description: ?[]const u8,
content_type: ?[]const u8,
sensitive: bool,
};
fn lookupDirectory(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !Uuid {
return (try db.queryRow(
std.meta.Tuple(
&.{util.Uuid},
),
\\SELECT id
\\FROM drive_entry_path
\\WHERE
\\ path = (CASE WHEN LENGTH($1) = 0 THEN '/' ELSE '/' || $1 || '/' END)
\\ AND account_owner_id IS NOT DISTINCT FROM $2
\\ AND community_owner_id IS NOT DISTINCT FROM $3
\\ AND kind = 'dir'
pub fn get(db: anytype, id: Uuid, alloc: std.mem.Allocator) !FileUpload {
return try db.queryRow(
FileUpload,
\\SELECT
\\ id,
\\ owner_id,
\\ size,
\\ filename,
\\ description,
\\ content_type,
\\ sensitive,
\\ status,
\\ created_at,
\\ updated_at
\\FROM file_upload
\\WHERE id = $1
\\LIMIT 1
,
.{
std.mem.trim(u8, path, "/"),
if (owner == .user_id) owner.user_id else null,
if (owner == .community_id) owner.community_id else null,
},
.{id},
alloc,
))[0];
);
}
fn lookup(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !Uuid {
return (try db.queryRow(
std.meta.Tuple(
&.{util.Uuid},
),
\\SELECT id
\\FROM drive_entry_path
\\WHERE
\\ path = (CASE WHEN LENGTH($1) = 0 THEN '/' ELSE '/' || $1 || '/' END)
\\ AND account_owner_id IS NOT DISTINCT FROM $2
\\ AND community_owner_id IS NOT DISTINCT FROM $3
\\LIMIT 1
,
.{
std.mem.trim(u8, path, "/"),
if (owner == .user_id) owner.user_id else null,
if (owner == .community_id) owner.community_id else null,
},
alloc,
))[0];
pub const PartialMeta = Partial(FileMeta);
pub fn Partial(comptime T: type) type {
const t_fields = std.meta.fields(T);
var fields: [t_fields.len]std.builtin.Type.StructField = undefined;
for (std.meta.fields(T)) |f, i| fields[i] = .{
.name = f.name,
.field_type = ?f.field_type,
.default_value = &@as(?f.field_type, null),
.is_comptime = false,
.alignment = @alignOf(?f.field_type),
};
return @Type(.{ .Struct = .{
.layout = .Auto,
.fields = &fields,
.decls = &.{},
.is_tuple = false,
} });
}
pub fn mkdir(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !void {
var split = std.mem.splitBackwards(u8, std.mem.trim(u8, path, "/"), "/");
const name = split.first();
const dir = split.rest();
std.log.debug("'{s}' / '{s}'", .{ name, dir });
pub fn update(db: anytype, id: Uuid, meta: PartialMeta, alloc: std.mem.Allocator) !void {
var builder = sql.QueryBuilder.init(alloc);
defer builder.deinit();
if (name.len == 0) return error.EmptyName;
try builder.appendSlice("UPDATE file_upload");
const id = Uuid.randV4(util.getThreadPrng());
if (meta.filename) |_| try builder.set("filename", "$2");
if (meta.description) |_| try builder.set("description", "$3");
if (meta.content_type) |_| try builder.set("content_type", "$4");
if (meta.sensitive) |_| try builder.set("sensitive", "$5");
const tx = try db.begin();
errdefer tx.rollback();
if (builder.set_statements_appended == 0) return error.NoChange;
const parent = try lookupDirectory(tx, owner, dir, alloc);
try builder.andWhere("id = $1");
try tx.insert("drive_entry", .{
.id = id,
std.log.debug("{any}", .{meta});
.account_owner_id = if (owner == .user_id) owner.user_id else null,
.community_owner_id = if (owner == .community_id) owner.community_id else null,
.name = name,
.parent_directory_id = parent,
}, alloc);
try tx.commit();
}
pub fn rmdir(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !void {
const tx = try db.begin();
errdefer tx.rollback();
const id = try lookupDirectory(tx, owner, path, alloc);
try tx.exec("DELETE FROM drive_directory WHERE id = $1", .{id}, alloc);
try tx.commit();
}
fn insertFileRow(tx: anytype, id: Uuid, filename: []const u8, owner: FileOwner, dir: Uuid, alloc: std.mem.Allocator) !void {
try tx.insert("drive_entry", .{
.id = id,
.account_owner_id = if (owner == .user_id) owner.user_id else null,
.community_owner_id = if (owner == .community_id) owner.community_id else null,
.parent_directory_id = dir,
.name = filename,
.file_id = id,
try db.exec(try builder.terminate(), .{
id,
meta.filename orelse null,
meta.description orelse null,
meta.content_type orelse null,
meta.sensitive orelse null,
}, alloc);
}
pub fn createFile(db: anytype, args: CreateFileArgs, data: []const u8, alloc: std.mem.Allocator) !void {
pub fn create(db: anytype, owner_id: Uuid, meta: FileMeta, data: []const u8, alloc: std.mem.Allocator) !Uuid {
const id = Uuid.randV4(util.getThreadPrng());
const now = DateTime.now();
try db.insert("file_upload", .{
.id = id,
{
var tx = try db.begin();
errdefer tx.rollback();
.owner_id = owner_id,
.size = data.len,
const dir_id = try lookupDirectory(tx, args.owner, args.dir, alloc);
.filename = meta.filename,
.description = meta.description,
.content_type = meta.content_type,
.sensitive = meta.sensitive,
try tx.insert("file_upload", .{
.id = id,
.status = FileStatus.uploading,
.filename = args.filename,
.created_at = now,
.updated_at = now,
}, alloc);
.created_by = args.created_by,
.size = data.len,
.description = args.description,
.content_type = args.content_type,
.sensitive = args.sensitive,
.is_deleted = false,
.created_at = now,
.updated_at = now,
}, alloc);
var sub_tx = try tx.savepoint();
if (insertFileRow(sub_tx, id, args.filename, args.owner, dir_id, alloc)) |_| {
try sub_tx.release();
} else |err| {
std.log.debug("{}", .{err});
switch (err) {
error.UniqueViolation => {
try sub_tx.rollbackSavepoint();
// Rename the file before trying again
var split = std.mem.split(u8, args.filename, ".");
const name = split.first();
const ext = split.rest();
var buf: [256]u8 = undefined;
const drive_filename = try std.fmt.bufPrint(&buf, "{s}.{}.{s}", .{ name, id, ext });
try insertFileRow(tx, id, drive_filename, args.owner, dir_id, alloc);
},
else => return error.DatabaseFailure,
}
}
try tx.commit();
}
errdefer {
db.exec("DELETE FROM file_upload WHERE ID = $1", .{id}, alloc) catch |err| {
std.log.err("Unable to remove file record in DB: {}", .{err});
saveFile(id, data) catch |err| {
db.exec("DELETE FROM file_upload WHERE ID = $1", .{id}, alloc) catch |e| {
std.log.err("Unable to remove file {} record in DB: {}", .{ id, e });
};
db.exec("DELETE FROM drive_entry WHERE ID = $1", .{id}, alloc) catch |err| {
std.log.err("Unable to remove file record in DB: {}", .{err});
};
}
return err;
};
try saveFile(id, data);
try db.exec(
\\UPDATE file_upload
\\SET status = 'uploaded'
\\WHERE id = $1
, .{id}, alloc);
return id;
}
pub fn delete(db: anytype, id: Uuid, alloc: std.mem.Allocator) !void {
var dir = try std.fs.cwd().openDir(data_root, .{});
defer dir.close();
try dir.deleteFile(&id.toCharArray());
try db.exec(
\\DELETE FROM file_upload
\\WHERE id = $1
, .{id}, alloc);
}
const data_root = "./files";
@ -218,17 +171,3 @@ pub fn deref(alloc: std.mem.Allocator, id: Uuid) ![]const u8 {
return dir.readFileAlloc(alloc, &id.toCharArray(), 1 << 32);
}
pub fn deleteFile(db: anytype, alloc: std.mem.Allocator, id: Uuid) !void {
var dir = try std.fs.cwd().openDir(data_root, .{});
defer dir.close();
try dir.deleteFile(id.toCharArray());
const tx = try db.beginOrSavepoint();
errdefer tx.rollback();
tx.exec("DELETE FROM drive_entry WHERE ID = $1", .{id}, alloc) catch return error.DatabaseFailure;
tx.exec("DELETE FROM file_upload WHERE ID = $1", .{id}, alloc) catch return error.DatabaseFailure;
try tx.commitOrRelease();
}

View file

@ -10,10 +10,10 @@ const Token = std.json.Token;
const unescapeValidString = std.json.unescapeValidString;
const UnescapeValidStringError = std.json.UnescapeValidStringError;
pub fn parse(comptime T: type, body: []const u8, alloc: std.mem.Allocator) !T {
pub fn parse(comptime T: type, allow_unknown_fields: bool, body: []const u8, alloc: std.mem.Allocator) !T {
var tokens = TokenStream.init(body);
const options = ParseOptions{ .allocator = alloc };
const options = ParseOptions{ .allocator = alloc, .ignore_unknown_fields = !allow_unknown_fields };
const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
const r = try parseInternal(T, token, &tokens, options);

View file

@ -672,7 +672,13 @@ const BaseContentType = enum {
other,
};
fn parseBodyFromRequest(comptime T: type, content_type: ?[]const u8, reader: anytype, alloc: std.mem.Allocator) !T {
fn parseBodyFromRequest(
comptime T: type,
comptime options: ParseBodyOptions,
content_type: ?[]const u8,
reader: anytype,
alloc: std.mem.Allocator,
) !T {
// Use json by default for now for testing purposes
const eff_type = content_type orelse "application/json";
const parser_type = matchContentType(eff_type);
@ -681,7 +687,7 @@ fn parseBodyFromRequest(comptime T: type, content_type: ?[]const u8, reader: any
.octet_stream, .json => {
const buf = try reader.readAllAlloc(alloc, 1 << 16);
defer alloc.free(buf);
const body = try json_utils.parse(T, buf, alloc);
const body = try json_utils.parse(T, options.allow_unknown_fields, buf, alloc);
defer json_utils.parseFree(body, alloc);
return try util.deepClone(alloc, body);
@ -689,14 +695,14 @@ fn parseBodyFromRequest(comptime T: type, content_type: ?[]const u8, reader: any
.url_encoded => {
const buf = try reader.readAllAlloc(alloc, 1 << 16);
defer alloc.free(buf);
return urlencode.parse(alloc, T, buf) catch |err| switch (err) {
return urlencode.parse(alloc, options.allow_unknown_fields, T, buf) catch |err| switch (err) {
//error.NoQuery => error.NoBody,
else => err,
};
},
.multipart_formdata => {
const boundary = fields.getParam(eff_type, "boundary") orelse return error.MissingBoundary;
return try @import("./multipart.zig").parseFormData(T, boundary, reader, alloc);
return try @import("./multipart.zig").parseFormData(T, options.allow_unknown_fields, boundary, reader, alloc);
},
else => return error.UnsupportedMediaType,
}
@ -714,12 +720,16 @@ fn matchContentType(hdr: []const u8) BaseContentType {
return .other;
}
pub const ParseBodyOptions = struct {
allow_unknown_fields: bool = false,
};
/// Parses a set of body arguments from the request body based on the request's Content-Type
/// header.
///
/// The exact method for parsing depends partially on the Content-Type. json types are preferred
/// TODO: Need tests for this, including various Content-Type values
pub fn ParseBody(comptime Body: type) type {
pub fn ParseBody(comptime Body: type, comptime options: ParseBodyOptions) type {
return struct {
pub fn handle(_: @This(), req: anytype, res: anytype, ctx: anytype, next: anytype) !void {
const content_type = req.headers.get("Content-Type");
@ -731,7 +741,7 @@ pub fn ParseBody(comptime Body: type) type {
}
var stream = req.body orelse return error.NoBody;
const body = try parseBodyFromRequest(Body, content_type, stream.reader(), ctx.allocator);
const body = try parseBodyFromRequest(Body, options, content_type, stream.reader(), ctx.allocator);
defer util.deepFree(ctx.allocator, body);
return next.handle(
@ -751,7 +761,7 @@ test "parseBodyFromRequest" {
const testCase = struct {
fn case(content_type: []const u8, body: []const u8, expected: anytype) !void {
var stream = std.io.StreamSource{ .const_buffer = std.io.fixedBufferStream(body) };
const result = try parseBodyFromRequest(@TypeOf(expected), content_type, stream.reader(), std.testing.allocator);
const result = try parseBodyFromRequest(@TypeOf(expected), .{}, content_type, stream.reader(), std.testing.allocator);
defer util.deepFree(std.testing.allocator, result);
try util.testing.expectDeepEqual(expected, result);
@ -797,7 +807,7 @@ pub fn ParseQueryParams(comptime QueryParams: type) type {
return struct {
pub fn handle(_: @This(), req: anytype, res: anytype, ctx: anytype, next: anytype) !void {
if (QueryParams == void) return next.handle(req, res, addField(ctx, "query_params", {}), {});
const query = try urlencode.parse(ctx.allocator, QueryParams, ctx.query_string);
const query = try urlencode.parse(ctx.allocator, true, QueryParams, ctx.query_string);
defer util.deepFree(ctx.allocator, query);
return next.handle(

View file

@ -182,7 +182,7 @@ fn Deserializer(comptime Result: type) type {
});
}
pub fn parseFormData(comptime T: type, boundary: []const u8, reader: anytype, alloc: std.mem.Allocator) !T {
pub fn parseFormData(comptime T: type, allow_unknown_fields: bool, boundary: []const u8, reader: anytype, alloc: std.mem.Allocator) !T {
var form = openForm(try openMultipart(boundary, reader));
var ds = Deserializer(T){};
@ -196,7 +196,13 @@ pub fn parseFormData(comptime T: type, boundary: []const u8, reader: anytype, al
var part = (try form.next(alloc)) orelse break;
errdefer util.deepFree(alloc, part);
try ds.setSerializedField(part.name, part);
ds.setSerializedField(part.name, part) catch |err| switch (err) {
error.UnknownField => if (allow_unknown_fields) {
util.deepFree(alloc, part);
continue;
} else return err,
else => |e| return e,
};
}
return try ds.finish(alloc);

View file

@ -98,13 +98,17 @@ pub const Iter = struct {
/// Would be used to parse a query string like
/// `?foo.baz=12345`
///
pub fn parse(alloc: std.mem.Allocator, comptime T: type, query: []const u8) !T {
pub fn parse(alloc: std.mem.Allocator, allow_unknown_fields: bool, comptime T: type, query: []const u8) !T {
var iter = Iter.from(query);
var deserializer = Deserializer(T){};
while (iter.next()) |pair| {
try deserializer.setSerializedField(pair.key, pair.value);
deserializer.setSerializedField(pair.key, pair.value) catch |err| switch (err) {
error.UnknownField => if (allow_unknown_fields) continue else return err,
else => |e| return e,
};
}
return try deserializer.finish(alloc);

View file

@ -73,6 +73,13 @@ pub fn EndpointRequest(comptime Endpoint: type) type {
const Body = if (@hasDecl(Endpoint, "Body")) Endpoint.Body else void;
const Query = if (@hasDecl(Endpoint, "Query")) Endpoint.Query else void;
const body_options = .{
.allow_unknown_fields = if (@hasDecl(Endpoint, "allow_unknown_fields_in_body"))
Endpoint.allow_unknown_fields_in_body
else
false,
};
allocator: std.mem.Allocator,
method: http.Method,
@ -91,7 +98,7 @@ pub fn EndpointRequest(comptime Endpoint: type) type {
const body_middleware = //if (Body == void)
//mdw.injectContext(.{ .body = {} })
//else
mdw.ParseBody(Body){};
mdw.ParseBody(Body, body_options){};
const query_middleware = //if (Query == void)
//mdw.injectContext(.{ .query_params = {} })

View file

@ -29,4 +29,8 @@ pub const routes = .{
controllers.apiEndpoint(follows.query_following),
controllers.apiEndpoint(drive.upload),
controllers.apiEndpoint(drive.mkdir),
controllers.apiEndpoint(drive.get),
controllers.apiEndpoint(drive.delete),
controllers.apiEndpoint(drive.move),
controllers.apiEndpoint(drive.update),
};

View file

@ -11,46 +11,15 @@ pub const DriveArgs = struct {
path: []const u8,
};
pub const query = struct {
pub const get = struct {
pub const method = .GET;
pub const path = drive_path;
pub const Args = DriveArgs;
pub const Query = struct {
const OrderBy = enum {
created_at,
filename,
};
max_items: usize = 20,
like: ?[]const u8 = null,
order_by: OrderBy = .created_at,
direction: api.Direction = .descending,
prev: ?struct {
id: Uuid,
order_val: union(OrderBy) {
created_at: DateTime,
filename: []const u8,
},
} = null,
page_direction: api.PageDirection = .forward,
};
pub fn handler(req: anytype, res: anytype, srv: anytype) !void {
const result = srv.driveQuery(req.args.path, req.query) catch |err| switch (err) {
error.NotADirectory => {
const meta = try srv.getFile(path);
try res.json(.ok, meta);
return;
},
else => |e| return e,
};
const result = try srv.driveGet(req.args.path);
try controller_utils.paginate(result, res, req.allocator);
try res.json(.ok, result);
}
};
@ -67,7 +36,7 @@ pub const upload = struct {
pub fn handler(req: anytype, res: anytype, srv: anytype) !void {
const f = req.body.file;
try srv.uploadFile(.{
try srv.driveUpload(.{
.dir = req.args.path,
.filename = f.filename,
.description = req.body.description,
@ -86,11 +55,7 @@ pub const delete = struct {
pub const Args = DriveArgs;
pub fn handler(req: anytype, res: anytype, srv: anytype) !void {
const info = try srv.driveLookup(req.args.path);
if (info == .dir)
try srv.driveRmdir(req.args.path)
else if (info == .file)
try srv.deleteFile(req.args.path);
try srv.driveDelete(req.args.path);
return res.json(.ok, .{});
}
@ -113,18 +78,23 @@ pub const update = struct {
pub const path = drive_path;
pub const Args = DriveArgs;
// TODO: Validate that unhandled fields are equivalent to ones in the object
pub const allow_unknown_fields_in_body = true;
pub const Body = struct {
filename: ?[]const u8 = null,
description: ?[]const u8 = null,
content_type: ?[]const u8 = null,
sensitive: ?bool = null,
};
pub fn handler(req: anytype, res: anytype, srv: anytype) !void {
const info = try srv.driveLookup(req.args.path);
if (info != .file) return error.NotFile;
const new_info = try srv.updateFile(path, req.body);
try res.json(.ok, new_info);
try srv.driveUpdate(req.args.path, .{
.filename = req.body.filename,
.description = req.body.description,
.content_type = req.body.content_type,
.sensitive = req.body.sensitive,
});
try res.json(.ok, .{});
}
};
@ -134,11 +104,11 @@ pub const move = struct {
pub const Args = DriveArgs;
pub fn handler(req: anytype, res: anytype, srv: anytype) !void {
const destination = req.fields.get("Destination") orelse return error.NoDestination;
const destination = req.headers.get("Destination") orelse return error.NoDestination;
try srv.driveMove(req.args.path, destination);
try res.fields.put("Location", destination);
try srv.json(.created, .{});
try res.headers.put("Location", destination);
try res.json(.created, .{});
}
};

View file

@ -1,4 +1,5 @@
const std = @import("std");
const util = @import("util");
const controllers = @import("../controllers.zig");
pub const routes = .{
@ -7,6 +8,7 @@ pub const routes = .{
controllers.apiEndpoint(login),
controllers.apiEndpoint(global_timeline),
controllers.apiEndpoint(cluster.overview),
controllers.apiEndpoint(media),
};
const index = struct {
@ -87,3 +89,24 @@ const cluster = struct {
}
};
};
const media = struct {
pub const path = "/media/:id";
pub const method = .GET;
pub const Args = struct {
id: util.Uuid,
};
pub fn handler(req: anytype, res: anytype, srv: anytype) !void {
const result = try srv.fileDereference(req.args.id);
defer util.deepFree(srv.allocator, result);
try res.headers.put("Content-Type", result.meta.content_type orelse "application/octet-stream");
var stream = try res.open(.ok);
defer stream.close();
try stream.writer().writeAll(result.data);
try stream.finish();
}
};

View file

@ -70,8 +70,9 @@ const create_migration_table =
\\);
;
// NOTE: Until the first public release, i may collapse multiple
// migrations into a single one. this will require db recreation
// NOTE: I might fuck with these until the v0.1 release. After that, I'll guarantee that you
// can upgrade to any v0.x release by just running unapplied migrations in order. You might
// need extra work to upgrade to v1.0 but you shouldn't have to recreate the db.
const migrations: []const Migration = &.{
.{
.name = "accounts and actors",
@ -212,7 +213,7 @@ const migrations: []const Migration = &.{
\\CREATE TABLE file_upload(
\\ id UUID NOT NULL PRIMARY KEY,
\\
\\ created_by UUID REFERENCES account(id),
\\ owner_id UUID REFERENCES actor(id),
\\ size INTEGER NOT NULL,
\\
\\ filename TEXT NOT NULL,
@ -220,17 +221,16 @@ const migrations: []const Migration = &.{
\\ content_type TEXT,
\\ sensitive BOOLEAN NOT NULL,
\\
\\ is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
\\ status TEXT NOT NULL,
\\
\\ created_at TIMESTAMPTZ NOT NULL,
\\ updated_at TIMESTAMPTZ NOT NULL
\\ created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
\\ updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
\\);
\\
\\CREATE TABLE drive_entry(
\\ id UUID NOT NULL PRIMARY KEY,
\\
\\ account_owner_id UUID REFERENCES account(id),
\\ community_owner_id UUID REFERENCES community(id),
\\ owner_id UUID REFERENCES actor(id),
\\
\\ name TEXT,
\\ parent_directory_id UUID REFERENCES drive_entry(id),
@ -238,10 +238,6 @@ const migrations: []const Migration = &.{
\\ file_id UUID REFERENCES file_upload(id),
\\
\\ CHECK(
\\ (account_owner_id IS NULL AND community_owner_id IS NOT NULL)
\\ OR (account_owner_id IS NOT NULL AND community_owner_id IS NULL)
\\ ),
\\ CHECK(
\\ (name IS NULL AND parent_directory_id IS NULL AND file_id IS NULL)
\\ OR (name IS NOT NULL AND parent_directory_id IS NOT NULL)
\\ )
@ -250,7 +246,7 @@ const migrations: []const Migration = &.{
\\ON drive_entry(
\\ name,
\\ COALESCE(parent_directory_id, ''),
\\ COALESCE(account_owner_id, community_owner_id)
\\ owner_id
\\);
,
.down =
@ -265,44 +261,41 @@ const migrations: []const Migration = &.{
\\CREATE VIEW drive_entry_path(
\\ id,
\\ path,
\\ account_owner_id,
\\ community_owner_id,
\\ owner_id,
\\ name,
\\ parent_directory_id,
\\ file_id,
\\ kind
\\) AS WITH RECURSIVE full_path(
\\ id,
\\ path,
\\ account_owner_id,
\\ community_owner_id,
\\ kind
\\ owner_id
\\) AS (
\\ SELECT
\\ id,
\\ '' AS path,
\\ account_owner_id,
\\ community_owner_id,
\\ 'dir' AS kind
\\ owner_id
\\ FROM drive_entry
\\ WHERE parent_directory_id IS NULL
\\ UNION ALL
\\ SELECT
\\ base.id,
\\ (dir.path || '/' || base.name) AS path,
\\ base.account_owner_id,
\\ base.community_owner_id,
\\ (CASE WHEN base.file_id IS NULL THEN 'dir' ELSE 'file' END) as kind
\\ base.owner_id
\\ FROM drive_entry AS base
\\ JOIN full_path AS dir ON
\\ base.parent_directory_id = dir.id
\\ AND base.account_owner_id IS NOT DISTINCT FROM dir.account_owner_id
\\ AND base.community_owner_id IS NOT DISTINCT FROM dir.community_owner_id
\\ AND base.owner_id = dir.owner_id
\\)
\\SELECT
\\ id,
\\ (CASE WHEN kind = 'dir' THEN path || '/' ELSE path END) AS path,
\\ account_owner_id,
\\ community_owner_id,
\\ kind
\\FROM full_path;
\\ full_path.id,
\\ (CASE WHEN LENGTH(full_path.path) = 0 THEN '/' ELSE full_path.path END) AS path,
\\ full_path.owner_id,
\\ drive_entry.name,
\\ drive_entry.parent_directory_id,
\\ drive_entry.file_id,
\\ (CASE WHEN drive_entry.file_id IS NULL THEN 'dir' ELSE 'file' END) as kind
\\FROM full_path JOIN drive_entry ON full_path.id = drive_entry.id;
,
.down =
\\DROP VIEW drive_entry_path;
@ -313,34 +306,40 @@ const migrations: []const Migration = &.{
.up =
\\INSERT INTO drive_entry(
\\ id,
\\ account_owner_id,
\\ community_owner_id,
\\ owner_id,
\\ parent_directory_id,
\\ name,
\\ file_id
\\) SELECT
\\ id,
\\ id AS account_owner_id,
\\ NULL AS community_owner_id,
\\ id AS owner_id,
\\ NULL AS parent_directory_id,
\\ NULL AS name,
\\ NULL AS file_id
\\FROM account;
\\INSERT INTO drive_entry(
\\FROM actor;
,
.down = "",
},
.{
.name = "community actors",
.up = "ALTER TABLE community ADD COLUMN community_actor_id UUID REFERENCES actor(id)",
.down = "ALTER COLUMN community DROP COLUMN community_actor_id",
},
.{
.name = "create community actors",
.up =
\\INSERT INTO actor(
\\ id,
\\ account_owner_id,
\\ community_owner_id,
\\ parent_directory_id,
\\ name,
\\ file_id
\\ username,
\\ community_id,
\\ created_at
\\) SELECT
\\ id,
\\ NULL AS account_owner_id,
\\ id AS community_owner_id,
\\ NULL AS parent_directory_id,
\\ NULL AS name,
\\ NULL AS file_id
\\ host AS username,
\\ id AS community_id,
\\ CURRENT_TIMESTAMP AS created_at
\\FROM community;
\\UPDATE community SET community_actor_id = id;
,
.down = "",
},

View file

@ -124,6 +124,7 @@ pub fn parseValueNotNull(alloc: ?Allocator, comptime T: type, str: []const u8) !
return error.ResultTypeMismatch;
},
.Optional => try parseValueNotNull(alloc, std.meta.Child(T), str),
.Bool => return util.serialize.bool_map.get(str) orelse return error.ResultTypeMismatch,
else => @compileError("Type " ++ @typeName(T) ++ " not supported"),
},

View file

@ -341,6 +341,7 @@ fn getColumnInt(stmt: *c.sqlite3_stmt, comptime T: type, idx: u15) common.GetErr
std.log.err("SQLite column {}: Expected value of type {}, got {} (outside of range)", .{ idx, T, val });
return error.ResultTypeMismatch;
},
.Bool => if (val == 0) return false else return true,
else => {
std.log.err("SQLite column {}: Storage class INT cannot be parsed into type {}", .{ idx, T });
return error.ResultTypeMismatch;

View file

@ -24,6 +24,8 @@ pub const QueryRowError = errors.QueryRowError;
pub const BeginError = errors.BeginError;
pub const CommitError = errors.CommitError;
pub const DatabaseError = QueryError || RowError || QueryRowError || BeginError || CommitError;
pub const QueryOptions = common.QueryOptions;
pub const Engine = enum {
@ -37,6 +39,7 @@ pub const Engine = enum {
pub const QueryBuilder = struct {
array: std.ArrayList(u8),
where_clauses_appended: usize = 0,
set_statements_appended: usize = 0,
pub fn init(alloc: std.mem.Allocator) QueryBuilder {
return QueryBuilder{ .array = std.ArrayList(u8).init(alloc) };
@ -58,7 +61,7 @@ pub const QueryBuilder = struct {
/// interspersed with calls to appendSlice
pub fn andWhere(self: *QueryBuilder, comptime clause: []const u8) !void {
if (self.where_clauses_appended == 0) {
try self.array.appendSlice("WHERE ");
try self.array.appendSlice("\nWHERE ");
} else {
try self.array.appendSlice(" AND ");
}
@ -67,6 +70,17 @@ pub const QueryBuilder = struct {
self.where_clauses_appended += 1;
}
pub fn set(self: *QueryBuilder, comptime col: []const u8, comptime val: []const u8) !void {
if (self.set_statements_appended == 0) {
try self.array.appendSlice("\nSET ");
} else {
try self.array.appendSlice(", ");
}
try self.array.appendSlice(col ++ " = " ++ val);
self.set_statements_appended += 1;
}
pub fn str(self: *const QueryBuilder) []const u8 {
return self.array.items;
}
@ -523,6 +537,17 @@ fn Tx(comptime tx_level: u8) type {
return row;
}
pub fn queryRows(
self: Self,
comptime RowType: type,
q: [:0]const u8,
args: anytype,
max_items: ?usize,
alloc: std.mem.Allocator,
) QueryRowError![]RowType {
return try self.queryRowsWithOptions(RowType, q, args, max_items, .{ .allocator = alloc });
}
// Runs a query to completion and returns the results as a slice
pub fn queryRowsWithOptions(
self: Self,

View file

@ -242,7 +242,7 @@ pub fn DeserializerContext(comptime Result: type, comptime From: type, comptime
};
}
const bool_map = std.ComptimeStringMap(bool, .{
pub const bool_map = std.ComptimeStringMap(bool, .{
.{ "true", true },
.{ "t", true },
.{ "yes", true },