diff --git a/src/api/lib.zig b/src/api/lib.zig index 58602ba..065fdc7 100644 --- a/src/api/lib.zig +++ b/src/api/lib.zig @@ -9,8 +9,7 @@ const services = struct { const communities = @import("./services/communities.zig"); const actors = @import("./services/actors.zig"); const auth = @import("./services/auth.zig"); - const drive = @import("./services/drive.zig"); - const files = @import("./services/files.zig"); + const drive = @import("./services/files.zig"); const invites = @import("./services/invites.zig"); const notes = @import("./services/notes.zig"); const follows = @import("./services/follows.zig"); @@ -140,30 +139,12 @@ pub const FollowingQueryResult = FollowQueryResult; pub const UploadFileArgs = struct { filename: []const u8, - dir: []const u8, + dir: ?[]const u8, description: ?[]const u8, content_type: []const u8, sensitive: bool, }; -pub const DriveEntry = services.drive.DriveEntry; -pub const FileUpload = services.files.FileUpload; -pub const DriveGetResult = union(services.drive.Kind) { - dir: struct { - entry: DriveEntry, - children: []DriveEntry, - }, - file: struct { - entry: DriveEntry, - file: FileUpload, - }, -}; - -pub const FileResult = struct { - meta: services.files.FileUpload, - data: []const u8, -}; - pub fn isAdminSetup(db: sql.Db) !bool { _ = services.communities.adminCommunityId(db) catch |err| switch (err) { error.NotFound => return false, @@ -538,98 +519,22 @@ fn ApiConn(comptime DbConn: type) type { ); } - pub fn driveUpload(self: *Self, meta: UploadFileArgs, body: []const u8) !void { + pub fn uploadFile(self: *Self, meta: UploadFileArgs, body: []const u8) !void { const user_id = self.user_id orelse return error.NoToken; - const file_id = try services.files.create(self.db, user_id, .{ + return try services.drive.createFile(self.db, .{ + .dir = meta.dir orelse "/", .filename = meta.filename, + .owner = .{ .user_id = user_id }, + .created_by = user_id, .description = meta.description, .content_type = meta.content_type, .sensitive = meta.sensitive, }, body, self.allocator); - - errdefer services.files.delete(self.db, file_id, self.allocator) catch |err| { - std.log.err("Unable to delete file {}: {}", .{ file_id, err }); - }; - - services.drive.create(self.db, user_id, meta.dir, meta.filename, file_id, self.allocator) catch |err| switch (err) { - error.PathAlreadyExists => { - var buf: [256]u8 = undefined; - var split = std.mem.splitBackwards(u8, meta.filename, "."); - const ext = split.first(); - const name = split.rest(); - const new_name = try std.fmt.bufPrint(&buf, "{s}.{s}.{s}", .{ name, file_id, ext }); - - try services.drive.create( - self.db, - user_id, - meta.dir, - new_name, - file_id, - self.allocator, - ); - }, - else => |e| return e, - }; } pub fn driveMkdir(self: *Self, path: []const u8) !void { const user_id = self.user_id orelse return error.NoToken; - var split = std.mem.splitBackwards(u8, path, "/"); - std.log.debug("{s}", .{path}); - const base = split.first(); - const dir = split.rest(); - try services.drive.create(self.db, user_id, dir, base, null, self.allocator); - } - - pub fn driveDelete(self: *Self, path: []const u8) !void { - const user_id = self.user_id orelse return error.NoToken; - const entry = try services.drive.stat(self.db, user_id, path, self.allocator); - defer util.deepFree(self.allocator, entry); - try services.drive.delete(self.db, entry.id, self.allocator); - if (entry.file_id) |file_id| try services.files.delete(self.db, file_id, self.allocator); - } - - pub fn driveMove(self: *Self, src: []const u8, dest: []const u8) !void { - const user_id = self.user_id orelse return error.NoToken; - try services.drive.move(self.db, user_id, src, dest, self.allocator); - } - - pub fn driveGet(self: *Self, path: []const u8) !DriveGetResult { - const user_id = self.user_id orelse return error.NoToken; - const entry = try services.drive.stat(self.db, user_id, path, self.allocator); - errdefer util.deepFree(self.allocator, entry); - - if (entry.file_id) |file_id| return .{ - .file = .{ - .entry = entry, - .file = try services.files.get(self.db, file_id, self.allocator), - }, - } else return .{ - .dir = .{ - .entry = entry, - .children = try services.drive.list(self.db, entry.id, self.allocator), - }, - }; - } - - pub fn driveUpdate(self: *Self, path: []const u8, meta: services.files.PartialMeta) !void { - const user_id = self.user_id orelse return error.NoToken; - std.log.debug("{s}", .{path}); - const entry = try services.drive.stat(self.db, user_id, path, self.allocator); - errdefer util.deepFree(self.allocator, entry); - - std.log.debug("{}", .{entry.id}); - try services.files.update(self.db, entry.file_id orelse return error.NotAFile, meta, self.allocator); - } - - pub fn fileDereference(self: *Self, id: Uuid) !FileResult { - const meta = try services.files.get(self.db, id, self.allocator); - errdefer util.deepFree(self.allocator, meta); - - return FileResult{ - .meta = meta, - .data = try services.files.deref(self.allocator, id), - }; + try services.drive.mkdir(self.db, .{ .user_id = user_id }, path, self.allocator); } }; } diff --git a/src/api/services/auth.zig b/src/api/services/auth.zig index 426c734..03a5feb 100644 --- a/src/api/services/auth.zig +++ b/src/api/services/auth.zig @@ -55,10 +55,6 @@ pub fn register( .hash = hash, .changed_at = DateTime.now(), }, alloc) catch return error.DatabaseFailure; - tx.insert("drive_entry", .{ - .id = id, - .owner_id = id, - }, alloc) catch return error.DatabaseFailure; tx.commitOrRelease() catch return error.DatabaseFailure; diff --git a/src/api/services/communities.zig b/src/api/services/communities.zig index 824957e..780b7d5 100644 --- a/src/api/services/communities.zig +++ b/src/api/services/communities.zig @@ -3,7 +3,6 @@ const builtin = @import("builtin"); const util = @import("util"); const sql = @import("sql"); const common = @import("./common.zig"); -const actors = @import("./actors.zig"); const Uuid = util.Uuid; const DateTime = util.DateTime; @@ -39,10 +38,11 @@ pub const CreateOptions = struct { }; pub const CreateError = error{ + DatabaseFailure, UnsupportedScheme, InvalidOrigin, CommunityExists, -} || sql.DatabaseError; +}; pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: std.mem.Allocator) CreateError!Uuid { const scheme_len = std.mem.indexOfScalar(u8, origin, ':') orelse return error.InvalidOrigin; @@ -71,9 +71,7 @@ pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: st const id = Uuid.randV4(util.getThreadPrng()); // TODO: wrap this in TX - var tx = try db.beginOrSavepoint(); - errdefer tx.rollback(); - if (tx.queryRow( + if (db.queryRow( std.meta.Tuple(&.{Uuid}), "SELECT id FROM community WHERE host = $1", .{host}, @@ -82,11 +80,11 @@ pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: st return error.CommunityExists; } else |err| switch (err) { error.NoRows => {}, - else => |e| return e, + else => return error.DatabaseFailure, } const name = options.name orelse host; - try tx.insert("community", .{ + db.insert("community", .{ .id = id, .owner_id = null, .host = host, @@ -94,26 +92,8 @@ pub fn create(db: anytype, origin: []const u8, options: CreateOptions, alloc: st .scheme = scheme, .kind = options.kind, .created_at = DateTime.now(), - }, alloc); + }, alloc) catch return error.DatabaseFailure; - if (options.kind == .local) { - const actor_id = actors.create(tx, "community.actor", id, alloc) catch |err| switch (err) { - error.UsernameContainsInvalidChar, - error.UsernameTooLong, - error.UsernameEmpty, - error.UsernameTaken, - => unreachable, - else => @panic("TODO"), - }; - try tx.exec( - \\UPDATE community - \\SET community_actor_id = $1 - \\WHERE id = $2 - \\LIMIT 1 - , .{ actor_id, id }, alloc); - } - - try tx.commitOrRelease(); return id; } diff --git a/src/api/services/drive.zig b/src/api/services/drive.zig deleted file mode 100644 index d7c3e40..0000000 --- a/src/api/services/drive.zig +++ /dev/null @@ -1,129 +0,0 @@ -const std = @import("std"); -const util = @import("util"); -const sql = @import("sql"); - -const Uuid = util.Uuid; -const DateTime = util.DateTime; - -pub const DriveOwner = union(enum) { - user_id: Uuid, - community_id: Uuid, -}; - -pub const DriveEntry = struct { - id: Uuid, - owner_id: Uuid, - name: ?[]const u8, - path: []const u8, - parent_directory_id: ?Uuid, - file_id: ?Uuid, - kind: Kind, -}; - -pub const Kind = enum { - dir, - file, - pub const jsonStringify = util.jsonSerializeEnumAsString; -}; - -pub fn stat(db: anytype, owner: Uuid, path: []const u8, alloc: std.mem.Allocator) !DriveEntry { - return (db.queryRow(DriveEntry, - \\SELECT id, path, owner_id, name, file_id, kind, parent_directory_id - \\FROM drive_entry_path - \\WHERE owner_id = $1 AND path = ('/' || $2) - \\LIMIT 1 - , .{ - owner, - std.mem.trim(u8, path, "/"), - }, alloc) catch |err| switch (err) { - error.NoRows => return error.NotFound, - else => |e| return e, - }); -} - -/// Creates a file or directory -pub fn create(db: anytype, owner: Uuid, dir: []const u8, name: []const u8, file_id: ?Uuid, alloc: std.mem.Allocator) !void { - if (name.len == 0) return error.EmptyName; - - const id = Uuid.randV4(util.getThreadPrng()); - - const tx = try db.begin(); - errdefer tx.rollback(); - - const parent = try stat(tx, owner, dir, alloc); - defer util.deepFree(alloc, parent); - - tx.insert("drive_entry", .{ - .id = id, - .owner_id = owner, - .name = name, - .parent_directory_id = parent.id, - .file_id = file_id, - }, alloc) catch |err| switch (err) { - error.UniqueViolation => return error.PathAlreadyExists, - else => |e| return e, - }; - - try tx.commit(); -} - -pub fn delete(db: anytype, id: Uuid, alloc: std.mem.Allocator) !void { - const tx = try db.beginOrSavepoint(); - errdefer tx.rollback(); - - if ((try tx.queryRow( - std.meta.Tuple(&.{usize}), - \\SELECT COUNT(1) - \\FROM drive_entry - \\WHERE parent_directory_id = $1 - , - .{id}, - alloc, - ))[0] != 0) { - return error.DirectoryNotEmpty; - } - - try tx.exec("DELETE FROM drive_entry WHERE id = $1", .{id}, alloc); - - try tx.commitOrRelease(); -} - -pub fn move(db: anytype, owner: Uuid, src: []const u8, dest: []const u8, alloc: std.mem.Allocator) !void { - const tx = try db.beginOrSavepoint(); - errdefer tx.rollback(); - - const val = try stat(tx, owner, src, alloc); - defer util.deepFree(alloc, val); - - if (val.parent_directory_id == null) return error.RootDirectory; - - var split = std.mem.splitBackwards(u8, std.mem.trim(u8, dest, "/"), "/"); - const name = split.first(); - const dir = split.rest(); - - const parent = try stat(tx, owner, dir, alloc); - defer util.deepFree(alloc, parent); - - try tx.exec( - \\UPDATE drive_entry - \\SET name = $1, parent_directory_id = $2 - \\WHERE id = $3 - , - .{ name, parent.id, val.id }, - alloc, - ); - - try tx.commitOrRelease(); -} - -// TODO: paginate this -pub fn list(db: anytype, id: Uuid, alloc: std.mem.Allocator) ![]DriveEntry { - return (db.queryRows(DriveEntry, - \\SELECT id, path, owner_id, name, file_id, kind, parent_directory_id - \\FROM drive_entry_path - \\WHERE parent_directory_id = $1 - , .{id}, null, alloc) catch |err| switch (err) { - error.NoRows => return error.NotFound, - else => |e| return e, - }); -} diff --git a/src/api/services/files.zig b/src/api/services/files.zig index 6f9c08c..147d049 100644 --- a/src/api/services/files.zig +++ b/src/api/services/files.zig @@ -1,156 +1,203 @@ const std = @import("std"); -const sql = @import("sql"); const util = @import("util"); const Uuid = util.Uuid; const DateTime = util.DateTime; -pub const FileStatus = enum { - uploading, - uploaded, - external, - deleted, - pub const jsonStringify = util.jsonSerializeEnumAsString; +pub const FileOwner = union(enum) { + user_id: Uuid, + community_id: Uuid, }; -pub const FileUpload = struct { +pub const DriveFile = struct { id: Uuid, - owner_id: Uuid, + path: []const u8, + filename: []const u8, + + owner: FileOwner, + size: usize, - filename: []const u8, - description: ?[]const u8, - content_type: ?[]const u8, + description: []const u8, + content_type: []const u8, sensitive: bool, - status: FileStatus, - created_at: DateTime, updated_at: DateTime, }; -pub const FileMeta = struct { +const EntryType = enum { + dir, + file, +}; + +pub const CreateFileArgs = struct { + dir: []const u8, filename: []const u8, + owner: FileOwner, + created_by: Uuid, description: ?[]const u8, content_type: ?[]const u8, sensitive: bool, }; -pub fn get(db: anytype, id: Uuid, alloc: std.mem.Allocator) !FileUpload { - return try db.queryRow( - FileUpload, - \\SELECT - \\ id, - \\ owner_id, - \\ size, - \\ filename, - \\ description, - \\ content_type, - \\ sensitive, - \\ status, - \\ created_at, - \\ updated_at - \\FROM file_upload - \\WHERE id = $1 +fn lookupDirectory(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !Uuid { + return (try db.queryRow( + std.meta.Tuple( + &.{util.Uuid}, + ), + \\SELECT id + \\FROM drive_entry_path + \\WHERE + \\ path = (CASE WHEN LENGTH($1) = 0 THEN '/' ELSE '/' || $1 || '/' END) + \\ AND account_owner_id IS NOT DISTINCT FROM $2 + \\ AND community_owner_id IS NOT DISTINCT FROM $3 + \\ AND kind = 'dir' \\LIMIT 1 , - .{id}, + .{ + std.mem.trim(u8, path, "/"), + if (owner == .user_id) owner.user_id else null, + if (owner == .community_id) owner.community_id else null, + }, alloc, - ); + ))[0]; } -pub const PartialMeta = Partial(FileMeta); -pub fn Partial(comptime T: type) type { - const t_fields = std.meta.fields(T); - var fields: [t_fields.len]std.builtin.Type.StructField = undefined; - for (std.meta.fields(T)) |f, i| fields[i] = .{ - .name = f.name, - .field_type = ?f.field_type, - .default_value = &@as(?f.field_type, null), - .is_comptime = false, - .alignment = @alignOf(?f.field_type), - }; - return @Type(.{ .Struct = .{ - .layout = .Auto, - .fields = &fields, - .decls = &.{}, - .is_tuple = false, - } }); +fn lookup(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !Uuid { + return (try db.queryRow( + std.meta.Tuple( + &.{util.Uuid}, + ), + \\SELECT id + \\FROM drive_entry_path + \\WHERE + \\ path = (CASE WHEN LENGTH($1) = 0 THEN '/' ELSE '/' || $1 || '/' END) + \\ AND account_owner_id IS NOT DISTINCT FROM $2 + \\ AND community_owner_id IS NOT DISTINCT FROM $3 + \\LIMIT 1 + , + .{ + std.mem.trim(u8, path, "/"), + if (owner == .user_id) owner.user_id else null, + if (owner == .community_id) owner.community_id else null, + }, + alloc, + ))[0]; } -pub fn update(db: anytype, id: Uuid, meta: PartialMeta, alloc: std.mem.Allocator) !void { - var builder = sql.QueryBuilder.init(alloc); - defer builder.deinit(); +pub fn mkdir(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !void { + var split = std.mem.splitBackwards(u8, std.mem.trim(u8, path, "/"), "/"); + const name = split.first(); + const dir = split.rest(); + std.log.debug("'{s}' / '{s}'", .{ name, dir }); - try builder.appendSlice("UPDATE file_upload"); + if (name.len == 0) return error.EmptyName; - if (meta.filename) |_| try builder.set("filename", "$2"); - if (meta.description) |_| try builder.set("description", "$3"); - if (meta.content_type) |_| try builder.set("content_type", "$4"); - if (meta.sensitive) |_| try builder.set("sensitive", "$5"); - - if (builder.set_statements_appended == 0) return error.NoChange; - - try builder.andWhere("id = $1"); - - std.log.debug("{any}", .{meta}); - - try db.exec(try builder.terminate(), .{ - id, - meta.filename orelse null, - meta.description orelse null, - meta.content_type orelse null, - meta.sensitive orelse null, - }, alloc); -} - -pub fn create(db: anytype, owner_id: Uuid, meta: FileMeta, data: []const u8, alloc: std.mem.Allocator) !Uuid { const id = Uuid.randV4(util.getThreadPrng()); - const now = DateTime.now(); - try db.insert("file_upload", .{ + + const tx = try db.begin(); + errdefer tx.rollback(); + + const parent = try lookupDirectory(tx, owner, dir, alloc); + + try tx.insert("drive_entry", .{ .id = id, - .owner_id = owner_id, - .size = data.len, + .account_owner_id = if (owner == .user_id) owner.user_id else null, + .community_owner_id = if (owner == .community_id) owner.community_id else null, - .filename = meta.filename, - .description = meta.description, - .content_type = meta.content_type, - .sensitive = meta.sensitive, - - .status = FileStatus.uploading, - - .created_at = now, - .updated_at = now, + .name = name, + .parent_directory_id = parent, }, alloc); - - saveFile(id, data) catch |err| { - db.exec("DELETE FROM file_upload WHERE ID = $1", .{id}, alloc) catch |e| { - std.log.err("Unable to remove file {} record in DB: {}", .{ id, e }); - }; - return err; - }; - - try db.exec( - \\UPDATE file_upload - \\SET status = 'uploaded' - \\WHERE id = $1 - , .{id}, alloc); - - return id; + try tx.commit(); } -pub fn delete(db: anytype, id: Uuid, alloc: std.mem.Allocator) !void { - var dir = try std.fs.cwd().openDir(data_root, .{}); - defer dir.close(); +pub fn rmdir(db: anytype, owner: FileOwner, path: []const u8, alloc: std.mem.Allocator) !void { + const tx = try db.begin(); + errdefer tx.rollback(); - try dir.deleteFile(&id.toCharArray()); + const id = try lookupDirectory(tx, owner, path, alloc); + try tx.exec("DELETE FROM drive_directory WHERE id = $1", .{id}, alloc); + try tx.commit(); +} - try db.exec( - \\DELETE FROM file_upload - \\WHERE id = $1 - , .{id}, alloc); +fn insertFileRow(tx: anytype, id: Uuid, filename: []const u8, owner: FileOwner, dir: Uuid, alloc: std.mem.Allocator) !void { + try tx.insert("drive_entry", .{ + .id = id, + + .account_owner_id = if (owner == .user_id) owner.user_id else null, + .community_owner_id = if (owner == .community_id) owner.community_id else null, + + .parent_directory_id = dir, + .name = filename, + + .file_id = id, + }, alloc); +} + +pub fn createFile(db: anytype, args: CreateFileArgs, data: []const u8, alloc: std.mem.Allocator) !void { + const id = Uuid.randV4(util.getThreadPrng()); + const now = DateTime.now(); + + { + var tx = try db.begin(); + errdefer tx.rollback(); + + const dir_id = try lookupDirectory(tx, args.owner, args.dir, alloc); + + try tx.insert("file_upload", .{ + .id = id, + + .filename = args.filename, + + .created_by = args.created_by, + .size = data.len, + + .description = args.description, + .content_type = args.content_type, + .sensitive = args.sensitive, + + .is_deleted = false, + + .created_at = now, + .updated_at = now, + }, alloc); + + var sub_tx = try tx.savepoint(); + if (insertFileRow(sub_tx, id, args.filename, args.owner, dir_id, alloc)) |_| { + try sub_tx.release(); + } else |err| { + std.log.debug("{}", .{err}); + switch (err) { + error.UniqueViolation => { + try sub_tx.rollbackSavepoint(); + // Rename the file before trying again + var split = std.mem.split(u8, args.filename, "."); + const name = split.first(); + const ext = split.rest(); + var buf: [256]u8 = undefined; + const drive_filename = try std.fmt.bufPrint(&buf, "{s}.{}.{s}", .{ name, id, ext }); + try insertFileRow(tx, id, drive_filename, args.owner, dir_id, alloc); + }, + else => return error.DatabaseFailure, + } + } + + try tx.commit(); + } + + errdefer { + db.exec("DELETE FROM file_upload WHERE ID = $1", .{id}, alloc) catch |err| { + std.log.err("Unable to remove file record in DB: {}", .{err}); + }; + db.exec("DELETE FROM drive_entry WHERE ID = $1", .{id}, alloc) catch |err| { + std.log.err("Unable to remove file record in DB: {}", .{err}); + }; + } + + try saveFile(id, data); } const data_root = "./files"; @@ -171,3 +218,17 @@ pub fn deref(alloc: std.mem.Allocator, id: Uuid) ![]const u8 { return dir.readFileAlloc(alloc, &id.toCharArray(), 1 << 32); } + +pub fn deleteFile(db: anytype, alloc: std.mem.Allocator, id: Uuid) !void { + var dir = try std.fs.cwd().openDir(data_root, .{}); + defer dir.close(); + + try dir.deleteFile(id.toCharArray()); + + const tx = try db.beginOrSavepoint(); + errdefer tx.rollback(); + + tx.exec("DELETE FROM drive_entry WHERE ID = $1", .{id}, alloc) catch return error.DatabaseFailure; + tx.exec("DELETE FROM file_upload WHERE ID = $1", .{id}, alloc) catch return error.DatabaseFailure; + try tx.commitOrRelease(); +} diff --git a/src/http/json.zig b/src/http/json.zig index ee6a852..21474cc 100644 --- a/src/http/json.zig +++ b/src/http/json.zig @@ -10,10 +10,10 @@ const Token = std.json.Token; const unescapeValidString = std.json.unescapeValidString; const UnescapeValidStringError = std.json.UnescapeValidStringError; -pub fn parse(comptime T: type, allow_unknown_fields: bool, body: []const u8, alloc: std.mem.Allocator) !T { +pub fn parse(comptime T: type, body: []const u8, alloc: std.mem.Allocator) !T { var tokens = TokenStream.init(body); - const options = ParseOptions{ .allocator = alloc, .ignore_unknown_fields = !allow_unknown_fields }; + const options = ParseOptions{ .allocator = alloc }; const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson; const r = try parseInternal(T, token, &tokens, options); diff --git a/src/http/middleware.zig b/src/http/middleware.zig index f4b4630..ce4d307 100644 --- a/src/http/middleware.zig +++ b/src/http/middleware.zig @@ -672,13 +672,7 @@ const BaseContentType = enum { other, }; -fn parseBodyFromRequest( - comptime T: type, - comptime options: ParseBodyOptions, - content_type: ?[]const u8, - reader: anytype, - alloc: std.mem.Allocator, -) !T { +fn parseBodyFromRequest(comptime T: type, content_type: ?[]const u8, reader: anytype, alloc: std.mem.Allocator) !T { // Use json by default for now for testing purposes const eff_type = content_type orelse "application/json"; const parser_type = matchContentType(eff_type); @@ -687,7 +681,7 @@ fn parseBodyFromRequest( .octet_stream, .json => { const buf = try reader.readAllAlloc(alloc, 1 << 16); defer alloc.free(buf); - const body = try json_utils.parse(T, options.allow_unknown_fields, buf, alloc); + const body = try json_utils.parse(T, buf, alloc); defer json_utils.parseFree(body, alloc); return try util.deepClone(alloc, body); @@ -695,14 +689,14 @@ fn parseBodyFromRequest( .url_encoded => { const buf = try reader.readAllAlloc(alloc, 1 << 16); defer alloc.free(buf); - return urlencode.parse(alloc, options.allow_unknown_fields, T, buf) catch |err| switch (err) { + return urlencode.parse(alloc, T, buf) catch |err| switch (err) { //error.NoQuery => error.NoBody, else => err, }; }, .multipart_formdata => { const boundary = fields.getParam(eff_type, "boundary") orelse return error.MissingBoundary; - return try @import("./multipart.zig").parseFormData(T, options.allow_unknown_fields, boundary, reader, alloc); + return try @import("./multipart.zig").parseFormData(T, boundary, reader, alloc); }, else => return error.UnsupportedMediaType, } @@ -720,16 +714,12 @@ fn matchContentType(hdr: []const u8) BaseContentType { return .other; } -pub const ParseBodyOptions = struct { - allow_unknown_fields: bool = false, -}; - /// Parses a set of body arguments from the request body based on the request's Content-Type /// header. /// /// The exact method for parsing depends partially on the Content-Type. json types are preferred /// TODO: Need tests for this, including various Content-Type values -pub fn ParseBody(comptime Body: type, comptime options: ParseBodyOptions) type { +pub fn ParseBody(comptime Body: type) type { return struct { pub fn handle(_: @This(), req: anytype, res: anytype, ctx: anytype, next: anytype) !void { const content_type = req.headers.get("Content-Type"); @@ -741,7 +731,7 @@ pub fn ParseBody(comptime Body: type, comptime options: ParseBodyOptions) type { } var stream = req.body orelse return error.NoBody; - const body = try parseBodyFromRequest(Body, options, content_type, stream.reader(), ctx.allocator); + const body = try parseBodyFromRequest(Body, content_type, stream.reader(), ctx.allocator); defer util.deepFree(ctx.allocator, body); return next.handle( @@ -761,7 +751,7 @@ test "parseBodyFromRequest" { const testCase = struct { fn case(content_type: []const u8, body: []const u8, expected: anytype) !void { var stream = std.io.StreamSource{ .const_buffer = std.io.fixedBufferStream(body) }; - const result = try parseBodyFromRequest(@TypeOf(expected), .{}, content_type, stream.reader(), std.testing.allocator); + const result = try parseBodyFromRequest(@TypeOf(expected), content_type, stream.reader(), std.testing.allocator); defer util.deepFree(std.testing.allocator, result); try util.testing.expectDeepEqual(expected, result); @@ -807,7 +797,7 @@ pub fn ParseQueryParams(comptime QueryParams: type) type { return struct { pub fn handle(_: @This(), req: anytype, res: anytype, ctx: anytype, next: anytype) !void { if (QueryParams == void) return next.handle(req, res, addField(ctx, "query_params", {}), {}); - const query = try urlencode.parse(ctx.allocator, true, QueryParams, ctx.query_string); + const query = try urlencode.parse(ctx.allocator, QueryParams, ctx.query_string); defer util.deepFree(ctx.allocator, query); return next.handle( diff --git a/src/http/multipart.zig b/src/http/multipart.zig index e4ccf98..815711d 100644 --- a/src/http/multipart.zig +++ b/src/http/multipart.zig @@ -182,7 +182,7 @@ fn Deserializer(comptime Result: type) type { }); } -pub fn parseFormData(comptime T: type, allow_unknown_fields: bool, boundary: []const u8, reader: anytype, alloc: std.mem.Allocator) !T { +pub fn parseFormData(comptime T: type, boundary: []const u8, reader: anytype, alloc: std.mem.Allocator) !T { var form = openForm(try openMultipart(boundary, reader)); var ds = Deserializer(T){}; @@ -196,13 +196,7 @@ pub fn parseFormData(comptime T: type, allow_unknown_fields: bool, boundary: []c var part = (try form.next(alloc)) orelse break; errdefer util.deepFree(alloc, part); - ds.setSerializedField(part.name, part) catch |err| switch (err) { - error.UnknownField => if (allow_unknown_fields) { - util.deepFree(alloc, part); - continue; - } else return err, - else => |e| return e, - }; + try ds.setSerializedField(part.name, part); } return try ds.finish(alloc); diff --git a/src/http/urlencode.zig b/src/http/urlencode.zig index ee671b7..3f49423 100644 --- a/src/http/urlencode.zig +++ b/src/http/urlencode.zig @@ -98,17 +98,13 @@ pub const Iter = struct { /// Would be used to parse a query string like /// `?foo.baz=12345` /// -pub fn parse(alloc: std.mem.Allocator, allow_unknown_fields: bool, comptime T: type, query: []const u8) !T { +pub fn parse(alloc: std.mem.Allocator, comptime T: type, query: []const u8) !T { var iter = Iter.from(query); var deserializer = Deserializer(T){}; while (iter.next()) |pair| { try deserializer.setSerializedField(pair.key, pair.value); - deserializer.setSerializedField(pair.key, pair.value) catch |err| switch (err) { - error.UnknownField => if (allow_unknown_fields) continue else return err, - else => |e| return e, - }; } return try deserializer.finish(alloc); diff --git a/src/main/controllers.zig b/src/main/controllers.zig index d60e0eb..398424c 100644 --- a/src/main/controllers.zig +++ b/src/main/controllers.zig @@ -73,13 +73,6 @@ pub fn EndpointRequest(comptime Endpoint: type) type { const Body = if (@hasDecl(Endpoint, "Body")) Endpoint.Body else void; const Query = if (@hasDecl(Endpoint, "Query")) Endpoint.Query else void; - const body_options = .{ - .allow_unknown_fields = if (@hasDecl(Endpoint, "allow_unknown_fields_in_body")) - Endpoint.allow_unknown_fields_in_body - else - false, - }; - allocator: std.mem.Allocator, method: http.Method, @@ -98,7 +91,7 @@ pub fn EndpointRequest(comptime Endpoint: type) type { const body_middleware = //if (Body == void) //mdw.injectContext(.{ .body = {} }) //else - mdw.ParseBody(Body, body_options){}; + mdw.ParseBody(Body){}; const query_middleware = //if (Query == void) //mdw.injectContext(.{ .query_params = {} }) diff --git a/src/main/controllers/api.zig b/src/main/controllers/api.zig index b767e5d..9a76c91 100644 --- a/src/main/controllers/api.zig +++ b/src/main/controllers/api.zig @@ -29,8 +29,4 @@ pub const routes = .{ controllers.apiEndpoint(follows.query_following), controllers.apiEndpoint(drive.upload), controllers.apiEndpoint(drive.mkdir), - controllers.apiEndpoint(drive.get), - controllers.apiEndpoint(drive.delete), - controllers.apiEndpoint(drive.move), - controllers.apiEndpoint(drive.update), }; diff --git a/src/main/controllers/api/drive.zig b/src/main/controllers/api/drive.zig index 15a3f39..f617898 100644 --- a/src/main/controllers/api/drive.zig +++ b/src/main/controllers/api/drive.zig @@ -11,15 +11,46 @@ pub const DriveArgs = struct { path: []const u8, }; -pub const get = struct { +pub const query = struct { pub const method = .GET; pub const path = drive_path; pub const Args = DriveArgs; - pub fn handler(req: anytype, res: anytype, srv: anytype) !void { - const result = try srv.driveGet(req.args.path); + pub const Query = struct { + const OrderBy = enum { + created_at, + filename, + }; - try res.json(.ok, result); + max_items: usize = 20, + + like: ?[]const u8 = null, + + order_by: OrderBy = .created_at, + direction: api.Direction = .descending, + + prev: ?struct { + id: Uuid, + order_val: union(OrderBy) { + created_at: DateTime, + filename: []const u8, + }, + } = null, + + page_direction: api.PageDirection = .forward, + }; + + pub fn handler(req: anytype, res: anytype, srv: anytype) !void { + const result = srv.driveQuery(req.args.path, req.query) catch |err| switch (err) { + error.NotADirectory => { + const meta = try srv.getFile(path); + try res.json(.ok, meta); + return; + }, + else => |e| return e, + }; + + try controller_utils.paginate(result, res, req.allocator); } }; @@ -36,7 +67,7 @@ pub const upload = struct { pub fn handler(req: anytype, res: anytype, srv: anytype) !void { const f = req.body.file; - try srv.driveUpload(.{ + try srv.uploadFile(.{ .dir = req.args.path, .filename = f.filename, .description = req.body.description, @@ -55,7 +86,11 @@ pub const delete = struct { pub const Args = DriveArgs; pub fn handler(req: anytype, res: anytype, srv: anytype) !void { - try srv.driveDelete(req.args.path); + const info = try srv.driveLookup(req.args.path); + if (info == .dir) + try srv.driveRmdir(req.args.path) + else if (info == .file) + try srv.deleteFile(req.args.path); return res.json(.ok, .{}); } @@ -78,23 +113,18 @@ pub const update = struct { pub const path = drive_path; pub const Args = DriveArgs; - // TODO: Validate that unhandled fields are equivalent to ones in the object - pub const allow_unknown_fields_in_body = true; pub const Body = struct { - filename: ?[]const u8 = null, description: ?[]const u8 = null, content_type: ?[]const u8 = null, sensitive: ?bool = null, }; pub fn handler(req: anytype, res: anytype, srv: anytype) !void { - try srv.driveUpdate(req.args.path, .{ - .filename = req.body.filename, - .description = req.body.description, - .content_type = req.body.content_type, - .sensitive = req.body.sensitive, - }); - try res.json(.ok, .{}); + const info = try srv.driveLookup(req.args.path); + if (info != .file) return error.NotFile; + + const new_info = try srv.updateFile(path, req.body); + try res.json(.ok, new_info); } }; @@ -104,11 +134,11 @@ pub const move = struct { pub const Args = DriveArgs; pub fn handler(req: anytype, res: anytype, srv: anytype) !void { - const destination = req.headers.get("Destination") orelse return error.NoDestination; + const destination = req.fields.get("Destination") orelse return error.NoDestination; try srv.driveMove(req.args.path, destination); - try res.headers.put("Location", destination); - try res.json(.created, .{}); + try res.fields.put("Location", destination); + try srv.json(.created, .{}); } }; diff --git a/src/main/controllers/web.zig b/src/main/controllers/web.zig index 0d0efe8..430e405 100644 --- a/src/main/controllers/web.zig +++ b/src/main/controllers/web.zig @@ -1,5 +1,4 @@ const std = @import("std"); -const util = @import("util"); const controllers = @import("../controllers.zig"); pub const routes = .{ @@ -8,7 +7,6 @@ pub const routes = .{ controllers.apiEndpoint(login), controllers.apiEndpoint(global_timeline), controllers.apiEndpoint(cluster.overview), - controllers.apiEndpoint(media), }; const index = struct { @@ -89,24 +87,3 @@ const cluster = struct { } }; }; - -const media = struct { - pub const path = "/media/:id"; - pub const method = .GET; - - pub const Args = struct { - id: util.Uuid, - }; - - pub fn handler(req: anytype, res: anytype, srv: anytype) !void { - const result = try srv.fileDereference(req.args.id); - defer util.deepFree(srv.allocator, result); - - try res.headers.put("Content-Type", result.meta.content_type orelse "application/octet-stream"); - var stream = try res.open(.ok); - defer stream.close(); - - try stream.writer().writeAll(result.data); - try stream.finish(); - } -}; diff --git a/src/main/migrations.zig b/src/main/migrations.zig index 86ecaed..a7465cb 100644 --- a/src/main/migrations.zig +++ b/src/main/migrations.zig @@ -70,9 +70,8 @@ const create_migration_table = \\); ; -// NOTE: I might fuck with these until the v0.1 release. After that, I'll guarantee that you -// can upgrade to any v0.x release by just running unapplied migrations in order. You might -// need extra work to upgrade to v1.0 but you shouldn't have to recreate the db. +// NOTE: Until the first public release, i may collapse multiple +// migrations into a single one. this will require db recreation const migrations: []const Migration = &.{ .{ .name = "accounts and actors", @@ -213,7 +212,7 @@ const migrations: []const Migration = &.{ \\CREATE TABLE file_upload( \\ id UUID NOT NULL PRIMARY KEY, \\ - \\ owner_id UUID REFERENCES actor(id), + \\ created_by UUID REFERENCES account(id), \\ size INTEGER NOT NULL, \\ \\ filename TEXT NOT NULL, @@ -221,16 +220,17 @@ const migrations: []const Migration = &.{ \\ content_type TEXT, \\ sensitive BOOLEAN NOT NULL, \\ - \\ status TEXT NOT NULL, + \\ is_deleted BOOLEAN NOT NULL DEFAULT FALSE, \\ - \\ created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, - \\ updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP + \\ created_at TIMESTAMPTZ NOT NULL, + \\ updated_at TIMESTAMPTZ NOT NULL \\); \\ \\CREATE TABLE drive_entry( \\ id UUID NOT NULL PRIMARY KEY, \\ - \\ owner_id UUID REFERENCES actor(id), + \\ account_owner_id UUID REFERENCES account(id), + \\ community_owner_id UUID REFERENCES community(id), \\ \\ name TEXT, \\ parent_directory_id UUID REFERENCES drive_entry(id), @@ -238,6 +238,10 @@ const migrations: []const Migration = &.{ \\ file_id UUID REFERENCES file_upload(id), \\ \\ CHECK( + \\ (account_owner_id IS NULL AND community_owner_id IS NOT NULL) + \\ OR (account_owner_id IS NOT NULL AND community_owner_id IS NULL) + \\ ), + \\ CHECK( \\ (name IS NULL AND parent_directory_id IS NULL AND file_id IS NULL) \\ OR (name IS NOT NULL AND parent_directory_id IS NOT NULL) \\ ) @@ -246,7 +250,7 @@ const migrations: []const Migration = &.{ \\ON drive_entry( \\ name, \\ COALESCE(parent_directory_id, ''), - \\ owner_id + \\ COALESCE(account_owner_id, community_owner_id) \\); , .down = @@ -261,41 +265,44 @@ const migrations: []const Migration = &.{ \\CREATE VIEW drive_entry_path( \\ id, \\ path, - \\ owner_id, - \\ name, - \\ parent_directory_id, - \\ file_id, + \\ account_owner_id, + \\ community_owner_id, \\ kind \\) AS WITH RECURSIVE full_path( \\ id, \\ path, - \\ owner_id + \\ account_owner_id, + \\ community_owner_id, + \\ kind \\) AS ( \\ SELECT \\ id, \\ '' AS path, - \\ owner_id + \\ account_owner_id, + \\ community_owner_id, + \\ 'dir' AS kind \\ FROM drive_entry \\ WHERE parent_directory_id IS NULL \\ UNION ALL \\ SELECT \\ base.id, \\ (dir.path || '/' || base.name) AS path, - \\ base.owner_id + \\ base.account_owner_id, + \\ base.community_owner_id, + \\ (CASE WHEN base.file_id IS NULL THEN 'dir' ELSE 'file' END) as kind \\ FROM drive_entry AS base \\ JOIN full_path AS dir ON \\ base.parent_directory_id = dir.id - \\ AND base.owner_id = dir.owner_id + \\ AND base.account_owner_id IS NOT DISTINCT FROM dir.account_owner_id + \\ AND base.community_owner_id IS NOT DISTINCT FROM dir.community_owner_id \\) \\SELECT - \\ full_path.id, - \\ (CASE WHEN LENGTH(full_path.path) = 0 THEN '/' ELSE full_path.path END) AS path, - \\ full_path.owner_id, - \\ drive_entry.name, - \\ drive_entry.parent_directory_id, - \\ drive_entry.file_id, - \\ (CASE WHEN drive_entry.file_id IS NULL THEN 'dir' ELSE 'file' END) as kind - \\FROM full_path JOIN drive_entry ON full_path.id = drive_entry.id; + \\ id, + \\ (CASE WHEN kind = 'dir' THEN path || '/' ELSE path END) AS path, + \\ account_owner_id, + \\ community_owner_id, + \\ kind + \\FROM full_path; , .down = \\DROP VIEW drive_entry_path; @@ -306,40 +313,34 @@ const migrations: []const Migration = &.{ .up = \\INSERT INTO drive_entry( \\ id, - \\ owner_id, + \\ account_owner_id, + \\ community_owner_id, \\ parent_directory_id, \\ name, \\ file_id \\) SELECT \\ id, - \\ id AS owner_id, + \\ id AS account_owner_id, + \\ NULL AS community_owner_id, \\ NULL AS parent_directory_id, \\ NULL AS name, \\ NULL AS file_id - \\FROM actor; - , - .down = "", - }, - .{ - .name = "community actors", - .up = "ALTER TABLE community ADD COLUMN community_actor_id UUID REFERENCES actor(id)", - .down = "ALTER COLUMN community DROP COLUMN community_actor_id", - }, - .{ - .name = "create community actors", - .up = - \\INSERT INTO actor( + \\FROM account; + \\INSERT INTO drive_entry( \\ id, - \\ username, - \\ community_id, - \\ created_at + \\ account_owner_id, + \\ community_owner_id, + \\ parent_directory_id, + \\ name, + \\ file_id \\) SELECT \\ id, - \\ host AS username, - \\ id AS community_id, - \\ CURRENT_TIMESTAMP AS created_at + \\ NULL AS account_owner_id, + \\ id AS community_owner_id, + \\ NULL AS parent_directory_id, + \\ NULL AS name, + \\ NULL AS file_id \\FROM community; - \\UPDATE community SET community_actor_id = id; , .down = "", }, diff --git a/src/sql/engines/common.zig b/src/sql/engines/common.zig index d5efad1..93169c4 100644 --- a/src/sql/engines/common.zig +++ b/src/sql/engines/common.zig @@ -124,7 +124,6 @@ pub fn parseValueNotNull(alloc: ?Allocator, comptime T: type, str: []const u8) ! return error.ResultTypeMismatch; }, .Optional => try parseValueNotNull(alloc, std.meta.Child(T), str), - .Bool => return util.serialize.bool_map.get(str) orelse return error.ResultTypeMismatch, else => @compileError("Type " ++ @typeName(T) ++ " not supported"), }, diff --git a/src/sql/engines/sqlite.zig b/src/sql/engines/sqlite.zig index a69de93..3b9c8c4 100644 --- a/src/sql/engines/sqlite.zig +++ b/src/sql/engines/sqlite.zig @@ -341,7 +341,6 @@ fn getColumnInt(stmt: *c.sqlite3_stmt, comptime T: type, idx: u15) common.GetErr std.log.err("SQLite column {}: Expected value of type {}, got {} (outside of range)", .{ idx, T, val }); return error.ResultTypeMismatch; }, - .Bool => if (val == 0) return false else return true, else => { std.log.err("SQLite column {}: Storage class INT cannot be parsed into type {}", .{ idx, T }); return error.ResultTypeMismatch; diff --git a/src/sql/lib.zig b/src/sql/lib.zig index ba583f2..69c371c 100644 --- a/src/sql/lib.zig +++ b/src/sql/lib.zig @@ -24,8 +24,6 @@ pub const QueryRowError = errors.QueryRowError; pub const BeginError = errors.BeginError; pub const CommitError = errors.CommitError; -pub const DatabaseError = QueryError || RowError || QueryRowError || BeginError || CommitError; - pub const QueryOptions = common.QueryOptions; pub const Engine = enum { @@ -39,7 +37,6 @@ pub const Engine = enum { pub const QueryBuilder = struct { array: std.ArrayList(u8), where_clauses_appended: usize = 0, - set_statements_appended: usize = 0, pub fn init(alloc: std.mem.Allocator) QueryBuilder { return QueryBuilder{ .array = std.ArrayList(u8).init(alloc) }; @@ -61,7 +58,7 @@ pub const QueryBuilder = struct { /// interspersed with calls to appendSlice pub fn andWhere(self: *QueryBuilder, comptime clause: []const u8) !void { if (self.where_clauses_appended == 0) { - try self.array.appendSlice("\nWHERE "); + try self.array.appendSlice("WHERE "); } else { try self.array.appendSlice(" AND "); } @@ -70,17 +67,6 @@ pub const QueryBuilder = struct { self.where_clauses_appended += 1; } - pub fn set(self: *QueryBuilder, comptime col: []const u8, comptime val: []const u8) !void { - if (self.set_statements_appended == 0) { - try self.array.appendSlice("\nSET "); - } else { - try self.array.appendSlice(", "); - } - - try self.array.appendSlice(col ++ " = " ++ val); - self.set_statements_appended += 1; - } - pub fn str(self: *const QueryBuilder) []const u8 { return self.array.items; } @@ -537,17 +523,6 @@ fn Tx(comptime tx_level: u8) type { return row; } - pub fn queryRows( - self: Self, - comptime RowType: type, - q: [:0]const u8, - args: anytype, - max_items: ?usize, - alloc: std.mem.Allocator, - ) QueryRowError![]RowType { - return try self.queryRowsWithOptions(RowType, q, args, max_items, .{ .allocator = alloc }); - } - // Runs a query to completion and returns the results as a slice pub fn queryRowsWithOptions( self: Self, diff --git a/src/util/serialize.zig b/src/util/serialize.zig index 4379b45..53d882f 100644 --- a/src/util/serialize.zig +++ b/src/util/serialize.zig @@ -242,7 +242,7 @@ pub fn DeserializerContext(comptime Result: type, comptime From: type, comptime }; } -pub const bool_map = std.ComptimeStringMap(bool, .{ +const bool_map = std.ComptimeStringMap(bool, .{ .{ "true", true }, .{ "t", true }, .{ "yes", true },