From 55dbdff6e573773382aca5882ae5761e6abb2968 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Quei=C3=9Fner?= <git@random-projects.net> Date: Tue, 4 Mar 2025 20:14:44 +0100 Subject: [PATCH 01/26] Starts to refactor disk-image-step into an actual standalone tool with build.zig support --- build.zig | 1045 ++------------------------------------------ concept/cli.txt | 8 + concept/script.dis | 31 ++ src/build.old.zig | 1019 ++++++++++++++++++++++++++++++++++++++++++ src/dim.zig | 16 + src/tokenizer.zig | 153 +++++++ 6 files changed, 1265 insertions(+), 1007 deletions(-) create mode 100644 concept/cli.txt create mode 100644 concept/script.dis create mode 100644 src/build.old.zig create mode 100644 src/dim.zig create mode 100644 src/tokenizer.zig diff --git a/build.zig b/build.zig index bbc14d4..a421305 100644 --- a/build.zig +++ b/build.zig @@ -1,1019 +1,50 @@ const std = @import("std"); const builtin = @import("builtin"); -fn root() []const u8 { - return comptime (std.fs.path.dirname(@src().file) orelse "."); -} -const build_root = root(); - -pub const KiB = 1024; -pub const MiB = 1024 * KiB; -pub const GiB = 1024 * MiB; - -fn usageDemo( - b: *std.Build, - dependency: *std.Build.Dependency, - debug_step: *std.Build.Step, -) void { - installDebugDisk(dependency, debug_step, "uninitialized.img", 50 * MiB, .uninitialized); - - installDebugDisk(dependency, debug_step, "empty-mbr.img", 50 * MiB, .{ - .mbr = .{ - .partitions = .{ - null, - null, - null, - null, - }, - }, - }); - - installDebugDisk(dependency, debug_step, "manual-offset-mbr.img", 50 * MiB, .{ - .mbr = .{ - .partitions = .{ - &.{ .offset = 2048 + 0 * 10 * MiB, .size = 10 * MiB, .bootable = true, .type = .fat32_lba, .data = .uninitialized }, - &.{ .offset = 2048 + 1 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .ntfs, .data = .uninitialized }, - &.{ .offset = 2048 + 2 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_swap, .data = .uninitialized }, - &.{ .offset = 2048 + 3 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .uninitialized }, - }, - }, - }); - - installDebugDisk(dependency, debug_step, "auto-offset-mbr.img", 50 * MiB, .{ - .mbr = .{ - .partitions = .{ - &.{ .size = 7 * MiB, .bootable = true, .type = .fat32_lba, .data = .uninitialized }, - &.{ .size = 8 * MiB, .bootable = false, .type = .ntfs, .data = .uninitialized }, - &.{ .size = 9 * MiB, .bootable = false, .type = .linux_swap, .data = .uninitialized }, - &.{ .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .uninitialized }, - }, - }, - }); - - installDebugDisk(dependency, debug_step, "empty-fat32.img", 50 * MiB, .{ - .fs = .{ - .format = .fat32, - .label = "EMPTY", - .items = &.{}, - }, - }); - - installDebugDisk(dependency, debug_step, "initialized-fat32.img", 50 * MiB, .{ - .fs = .{ - .format = .fat32, - .label = "ROOTFS", - .items = &.{ - .{ .empty_dir = "boot/EFI/refind/icons" }, - .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, - .{ .empty_dir = "Users/xq/" }, - .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, - .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, - }, - }, - }); - - installDebugDisk(dependency, debug_step, "initialized-fat32-in-mbr-partitions.img", 100 * MiB, .{ - .mbr = .{ - .partitions = .{ - &.{ - .size = 90 * MiB, - .bootable = true, - .type = .fat32_lba, - .data = .{ - .fs = .{ - .format = .fat32, - .label = "ROOTFS", - .items = &.{ - .{ .empty_dir = "boot/EFI/refind/icons" }, - .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, - .{ .empty_dir = "Users/xq/" }, - .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, - .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, - }, - }, - }, - }, - null, - null, - null, - }, - }, - }); - - // TODO: Implement GPT partition support - // installDebugDisk(debug_step, "empty-gpt.img", 50 * MiB, .{ - // .gpt = .{ - // .partitions = &.{}, - // }, - // }); -} - pub fn build(b: *std.Build) void { - // Steps: + const target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseSafe }); + + const test_step = b.step("test", "Runs the test suite."); + + // // Dependency Setup: + // const zfat_dep = b.dependency("zfat", .{ + // // .max_long_name_len = 121, + // .code_page = .us, + // .@"volume-count" = @as(u32, 1), + // .@"sector-size" = @as(u32, 512), + // // .rtc = .dynamic, + // .mkfs = true, + // .exfat = true, + // }); - const debug_step = b.step("debug", "Builds a basic exemplary disk image."); + // const zfat_mod = zfat_dep.module("zfat"); - // Dependency Setup: + // const mkfs_fat = b.addExecutable(.{ + // .name = "mkfs.fat", + // .target = b.graph.host, + // .optimize = .ReleaseSafe, + // .root_source_file = b.path("src/mkfs.fat.zig"), + // }); + // mkfs_fat.root_module.addImport("fat", zfat_mod); + // mkfs_fat.linkLibC(); + // b.installArtifact(mkfs_fat); - const zfat_dep = b.dependency("zfat", .{ - // .max_long_name_len = 121, - .code_page = .us, - .@"volume-count" = @as(u32, 1), - .@"sector-size" = @as(u32, 512), - // .rtc = .dynamic, - .mkfs = true, - .exfat = true, + const dim_mod = b.addModule("dim", .{ + .root_source_file = b.path("src/dim.zig"), + .target = target, + .optimize = optimize, }); - const zfat_mod = zfat_dep.module("zfat"); - - const mkfs_fat = b.addExecutable(.{ - .name = "mkfs.fat", - .target = b.graph.host, - .optimize = .ReleaseSafe, - .root_source_file = b.path("src/mkfs.fat.zig"), + const dim_exe = b.addExecutable(.{ + .name = "dim", + .root_module = dim_mod, }); - mkfs_fat.root_module.addImport("fat", zfat_mod); - mkfs_fat.linkLibC(); - b.installArtifact(mkfs_fat); - - // Usage: - var self_dep: std.Build.Dependency = .{ - .builder = b, - }; - usageDemo(b, &self_dep, debug_step); -} + b.installArtifact(dim_exe); -fn resolveFilesystemMaker(dependency: *std.Build.Dependency, fs: FileSystem.Format) std.Build.LazyPath { - return switch (fs) { - .fat12, .fat16, .fat32, .exfat => dependency.artifact("mkfs.fat").getEmittedBin(), - - .custom => |path| path, - - else => std.debug.panic("Unsupported builtin file system: {s}", .{@tagName(fs)}), - }; -} - -fn relpath(b: *std.Build, path: []const u8) std.Build.LazyPath { - return .{ - .cwd_relative = b.pathFromRoot(path), - }; -} - -fn installDebugDisk( - dependency: *std.Build.Dependency, - install_step: *std.Build.Step, - name: []const u8, - size: u64, - content: Content, -) void { - const initialize_disk = initializeDisk(dependency, size, content); - const install_disk = install_step.owner.addInstallFile(initialize_disk.getImageFile(), name); - install_step.dependOn(&install_disk.step); -} - -pub fn initializeDisk(dependency: *std.Build.Dependency, size: u64, content: Content) *InitializeDiskStep { - const ids = dependency.builder.allocator.create(InitializeDiskStep) catch @panic("out of memory"); - - ids.* = .{ - .step = std.Build.Step.init(.{ - .owner = dependency.builder, // TODO: Is this correct? - .id = .custom, - .name = "initialize disk", - .makeFn = InitializeDiskStep.make, - .first_ret_addr = @returnAddress(), - .max_rss = 0, - }), - .disk_file = .{ .step = &ids.step }, - .content = content.dupe(dependency.builder) catch @panic("out of memory"), - .size = size, - }; - - ids.content.resolveFileSystems(dependency); - - ids.content.pushDependenciesTo(&ids.step); - - return ids; + const dim_tests = b.addTest(.{ + .root_module = dim_mod, + }); + const run_dim_tests = b.addRunArtifact(dim_tests); + test_step.dependOn(&run_dim_tests.step); } - -pub const InitializeDiskStep = struct { - const IoPump = std.fifo.LinearFifo(u8, .{ .Static = 8192 }); - - step: std.Build.Step, - - content: Content, - size: u64, - - disk_file: std.Build.GeneratedFile, - - pub fn getImageFile(ids: *InitializeDiskStep) std.Build.LazyPath { - return .{ .generated = .{ - .file = &ids.disk_file, - } }; - } - - fn addDirectoryToCache(b: *std.Build, manifest: *std.Build.Cache.Manifest, parent: std.fs.Dir, path: []const u8) !void { - var dir = try parent.openDir(path, .{ .iterate = true }); - defer dir.close(); - - var walker = try dir.walk(b.allocator); - defer walker.deinit(); - - while (try walker.next()) |entry| { - switch (entry.kind) { - .file => { - const abs_path = try entry.dir.realpathAlloc(b.allocator, entry.basename); - defer b.allocator.free(abs_path); - _ = try manifest.addFile(abs_path, null); - }, - .directory => try addDirectoryToCache(b, manifest, entry.dir, entry.basename), - - else => return error.Unsupported, - } - } - } - - fn addToCacheManifest(b: *std.Build, asking: *std.Build.Step, manifest: *std.Build.Cache.Manifest, content: Content) !void { - manifest.hash.addBytes(@tagName(content)); - switch (content) { - .uninitialized => {}, - - .mbr => |table| { // MbrTable - manifest.hash.addBytes(&table.bootloader); - for (table.partitions) |part_or_null| { - const part = part_or_null orelse { - manifest.hash.addBytes("none"); - break; - }; - manifest.hash.add(part.bootable); - manifest.hash.add(part.offset orelse 0x04_03_02_01); - manifest.hash.add(part.size); - manifest.hash.add(part.type); - try addToCacheManifest(b, asking, manifest, part.data); - } - }, - - .gpt => |table| { // GptTable - manifest.hash.addBytes(&table.disk_id); - - for (table.partitions) |part| { - manifest.hash.addBytes(&part.part_id); - manifest.hash.addBytes(&part.type); - manifest.hash.addBytes(std.mem.sliceAsBytes(&part.name)); - - manifest.hash.add(part.offset orelse 0x04_03_02_01); - manifest.hash.add(part.size); - - manifest.hash.add(@as(u32, @bitCast(part.attributes))); - - try addToCacheManifest(b, asking, manifest, part.data); - } - }, - - .fs => |fs| { // FileSystem - manifest.hash.add(@as(u64, fs.items.len)); - manifest.hash.addBytes(@tagName(fs.format)); - manifest.hash.addBytes(fs.executable.?.getPath2(b, asking)); - - // TODO: Properly add internal file system - for (fs.items) |entry| { - manifest.hash.addBytes(@tagName(entry)); - switch (entry) { - .empty_dir => |dir| { - manifest.hash.addBytes(dir); - }, - .copy_dir => |dir| { - manifest.hash.addBytes(dir.destination); - try addDirectoryToCache(b, manifest, std.fs.cwd(), dir.source.getPath2(b, asking)); - }, - .copy_file => |file| { - manifest.hash.addBytes(file.destination); - _ = try manifest.addFile(file.source.getPath2(b, asking), null); - }, - } - } - }, - .data => |data| { - const path = data.getPath2(b, asking); - _ = try manifest.addFile(path, null); - }, - .binary => |binary| { - const path = binary.getEmittedBin().getPath2(b, asking); - _ = try manifest.addFile(path, null); - }, - } - } - - const HumanContext = std.BoundedArray(u8, 256); - - const DiskImage = struct { - path: []const u8, - handle: *std.fs.File, - }; - - fn writeDiskImage(b: *std.Build, asking: *std.Build.Step, disk: DiskImage, base: u64, length: u64, content: Content, context: *HumanContext) !void { - try disk.handle.seekTo(base); - - const context_len = context.len; - defer context.len = context_len; - - context.appendSliceAssumeCapacity("."); - context.appendSliceAssumeCapacity(@tagName(content)); - - switch (content) { - .uninitialized => {}, - - .mbr => |table| { // MbrTable - { - var boot_sector: [512]u8 = .{0} ** 512; - - @memcpy(boot_sector[0..table.bootloader.len], &table.bootloader); - - std.mem.writeInt(u32, boot_sector[0x1B8..0x1BC], if (table.disk_id) |disk_id| disk_id else 0x0000_0000, .little); - std.mem.writeInt(u16, boot_sector[0x1BC..0x1BE], 0x0000, .little); - - var all_auto = true; - var all_manual = true; - for (table.partitions) |part_or_null| { - const part = part_or_null orelse continue; - - if (part.offset != null) { - all_auto = false; - } else { - all_manual = false; - } - } - - if (!all_auto and !all_manual) { - std.log.err("{s}: not all partitions have an explicit offset!", .{context.slice()}); - return error.InvalidSectorBoundary; - } - - const part_base = 0x01BE; - var auto_offset: u64 = 2048; - for (table.partitions, 0..) |part_or_null, part_id| { - const reset_len = context.len; - defer context.len = reset_len; - - var buffer: [64]u8 = undefined; - context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); - - const desc = boot_sector[part_base + 16 * part_id ..][0..16]; - - if (part_or_null) |part| { - // https://wiki.osdev.org/MBR#Partition_table_entry_format - - const part_offset = part.offset orelse auto_offset; - - if ((part_offset % 512) != 0) { - std.log.err("{s}: .offset is not divisible by 512!", .{context.slice()}); - return error.InvalidSectorBoundary; - } - if ((part.size % 512) != 0) { - std.log.err("{s}: .size is not divisible by 512!", .{context.slice()}); - return error.InvalidSectorBoundary; - } - - const lba_u64 = @divExact(part_offset, 512); - const size_u64 = @divExact(part.size, 512); - - const lba = std.math.cast(u32, lba_u64) orelse { - std.log.err("{s}: .offset is out of bounds!", .{context.slice()}); - return error.InvalidSectorBoundary; - }; - const size = std.math.cast(u32, size_u64) orelse { - std.log.err("{s}: .size is out of bounds!", .{context.slice()}); - return error.InvalidSectorBoundary; - }; - - desc[0] = if (part.bootable) 0x80 else 0x00; - - desc[1..4].* = mbr.encodeMbrChsEntry(lba); // chs_start - desc[4] = @intFromEnum(part.type); - desc[5..8].* = mbr.encodeMbrChsEntry(lba + size - 1); // chs_end - std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start - std.mem.writeInt(u32, desc[12..16], size, .little); // block_count - - auto_offset += part.size; - } else { - @memset(desc, 0); // inactive - } - } - boot_sector[0x01FE] = 0x55; - boot_sector[0x01FF] = 0xAA; - - try disk.handle.writeAll(&boot_sector); - } - - { - var auto_offset: u64 = 2048; - for (table.partitions, 0..) |part_or_null, part_id| { - const part = part_or_null orelse continue; - - const reset_len = context.len; - defer context.len = reset_len; - - var buffer: [64]u8 = undefined; - context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); - - try writeDiskImage(b, asking, disk, base + auto_offset, part.size, part.data, context); - - auto_offset += part.size; - } - } - }, - - .gpt => |table| { // GptTable - _ = table; - std.log.err("{s}: GPT partition tables not supported yet!", .{context.slice()}); - return error.GptUnsupported; - }, - - .fs => |fs| { - const maker_exe = fs.executable.?.getPath2(b, asking); - - try disk.handle.sync(); - - // const disk_image_path = switch (builtin.os.tag) { - // .linux => blk: { - // const self_pid = std.os.linux.getpid(); - // break :blk b.fmt("/proc/{}/fd/{}", .{ self_pid, disk.handle }); - // }, - - // else => @compileError("TODO: Support this on other OS as well!"), - // }; - - var argv = std.ArrayList([]const u8).init(b.allocator); - defer argv.deinit(); - - try argv.appendSlice(&.{ - maker_exe, // exe - disk.path, // image file - b.fmt("0x{X:0>8}", .{base}), // filesystem offset (bytes) - b.fmt("0x{X:0>8}", .{length}), // filesystem length (bytes) - @tagName(fs.format), // filesystem type - "format", // cmd 1: format the disk - "mount", // cmd 2: mount it internally - }); - - for (fs.items) |item| { - switch (item) { - .empty_dir => |dir| { - try argv.append(b.fmt("mkdir;{s}", .{dir})); - }, - .copy_dir => |src_dst| { - try argv.append(b.fmt("dir;{s};{s}", .{ - src_dst.source.getPath2(b, asking), - src_dst.destination, - })); - }, - .copy_file => |src_dst| { - try argv.append(b.fmt("file;{s};{s}", .{ - src_dst.source.getPath2(b, asking), - src_dst.destination, - })); - }, - } - } - - // use shared access to the file: - const stdout = b.run(argv.items); - - try disk.handle.sync(); - - _ = stdout; - }, - - .data => |data| { - const path = data.getPath2(b, asking); - try copyFileToImage(disk, length, std.fs.cwd(), path, context.slice()); - }, - - .binary => |binary| { - const path = binary.getEmittedBin().getPath2(b, asking); - try copyFileToImage(disk, length, std.fs.cwd(), path, context.slice()); - }, - } - } - - fn copyFileToImage(disk: DiskImage, max_length: u64, dir: std.fs.Dir, path: []const u8, context: []const u8) !void { - errdefer std.log.err("{s}: failed to copy data to image.", .{context}); - - var file = try dir.openFile(path, .{}); - defer file.close(); - - const stat = try file.stat(); - if (stat.size > max_length) { - var realpath_buffer: [std.fs.max_path_bytes]u8 = undefined; - std.log.err("{s}: The file '{!s}' exceeds the size of the container. The file is {:.2} large, while the container only allows for {:.2}.", .{ - context, - dir.realpath(path, &realpath_buffer), - std.fmt.fmtIntSizeBin(stat.size), - std.fmt.fmtIntSizeBin(max_length), - }); - return error.FileTooLarge; - } - - var pumper = IoPump.init(); - - try pumper.pump(file.reader(), disk.handle.writer()); - - const padding = max_length - stat.size; - if (padding > 0) { - try disk.handle.writer().writeByteNTimes(' ', padding); - } - } - - fn make(step: *std.Build.Step, options: std.Build.Step.MakeOptions) !void { - const b = step.owner; - _ = options; - - const ids: *InitializeDiskStep = @fieldParentPtr("step", step); - - var man = b.graph.cache.obtain(); - defer man.deinit(); - - man.hash.addBytes(&.{ 232, 8, 75, 249, 2, 210, 51, 118, 171, 12 }); // Change when impl changes - - try addToCacheManifest(b, step, &man, ids.content); - - step.result_cached = try step.cacheHit(&man); - const digest = man.final(); - - const output_components = .{ "o", &digest, "disk.img" }; - const output_sub_path = b.pathJoin(&output_components); - const output_sub_dir_path = std.fs.path.dirname(output_sub_path).?; - b.cache_root.handle.makePath(output_sub_dir_path) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ - b.cache_root, output_sub_dir_path, @errorName(err), - }); - }; - - ids.disk_file.path = try b.cache_root.join(b.allocator, &output_components); - - if (step.result_cached) - return; - - { - const disk_path = ids.disk_file.path.?; - - var disk = try std.fs.cwd().createFile(disk_path, .{}); - defer disk.close(); - - try disk.seekTo(ids.size - 1); - try disk.writeAll("\x00"); - try disk.seekTo(0); - - var context: HumanContext = .{}; - context.appendSliceAssumeCapacity("disk"); - - const disk_image = DiskImage{ - .path = disk_path, - .handle = &disk, - }; - - try writeDiskImage(b, step, disk_image, 0, ids.size, ids.content, &context); - } - - // if (!step.result_cached) - try step.writeManifest(&man); - } -}; - -pub const Content = union(enum) { - uninitialized, - - mbr: mbr.Table, - gpt: gpt.Table, - - fs: FileSystem, - - data: std.Build.LazyPath, - - binary: *std.Build.Step.Compile, - - pub fn dupe(content: Content, b: *std.Build) !Content { - const allocator = b.allocator; - - switch (content) { - .uninitialized => return content, - .mbr => |table| { - var copy = table; - for (©.partitions) |*part| { - if (part.*) |*p| { - const buf = try b.allocator.create(mbr.Partition); - buf.* = p.*.*; - buf.data = try buf.data.dupe(b); - p.* = buf; - } - } - return .{ .mbr = copy }; - }, - .gpt => |table| { - var copy = table; - const partitions = try allocator.dupe(gpt.Partition, table.partitions); - for (partitions) |*part| { - part.data = try part.data.dupe(b); - } - copy.partitions = partitions; - return .{ .gpt = copy }; - }, - .fs => |fs| { - var copy = fs; - - copy.label = try allocator.dupe(u8, fs.label); - const items = try allocator.dupe(FileSystem.Item, fs.items); - for (items) |*item| { - switch (item.*) { - .empty_dir => |*dir| { - dir.* = try allocator.dupe(u8, dir.*); - }, - .copy_dir, .copy_file => |*cp| { - const cp_new: FileSystem.Copy = .{ - .destination = try allocator.dupe(u8, cp.destination), - .source = cp.source.dupe(b), - }; - cp.* = cp_new; - }, - } - } - copy.items = items; - - switch (copy.format) { - .custom => |*path| path.* = path.dupe(b), - else => {}, - } - - return .{ .fs = copy }; - }, - .data => |data| { - return .{ .data = data.dupe(b) }; - }, - .binary => |binary| { - return .{ .binary = binary }; - }, - } - } - - pub fn pushDependenciesTo(content: Content, step: *std.Build.Step) void { - switch (content) { - .uninitialized => {}, - .mbr => |table| { - for (table.partitions) |part| { - if (part) |p| { - p.data.pushDependenciesTo(step); - } - } - }, - .gpt => |table| { - for (table.partitions) |part| { - part.data.pushDependenciesTo(step); - } - }, - .fs => |fs| { - for (fs.items) |item| { - switch (item) { - .empty_dir => {}, - .copy_dir, .copy_file => |*cp| { - cp.source.addStepDependencies(step); - }, - } - } - if (fs.format == .custom) { - fs.format.custom.addStepDependencies(step); - } - fs.executable.?.addStepDependencies(step); // Must be resolved already, invoke resolveFileSystems before! - }, - .data => |data| data.addStepDependencies(step), - .binary => |binary| step.dependOn(&binary.step), - } - } - - pub fn resolveFileSystems(content: *Content, dependency: *std.Build.Dependency) void { - switch (content.*) { - .uninitialized => {}, - .mbr => |*table| { - for (&table.partitions) |*part| { - if (part.*) |p| { - @constCast(&p.data).resolveFileSystems(dependency); - } - } - }, - .gpt => |*table| { - for (table.partitions) |*part| { - @constCast(&part.data).resolveFileSystems(dependency); - } - }, - .fs => |*fs| { - fs.executable = resolveFilesystemMaker(dependency, fs.format); - }, - .data, .binary => {}, - } - } -}; - -pub const mbr = struct { - pub const Table = struct { - bootloader: [440]u8 = .{0} ** 440, - disk_id: ?u32 = null, - partitions: [4]?*const Partition, - }; - - pub const Partition = struct { - offset: ?u64 = null, - size: u64, - - bootable: bool, - type: PartitionType, - - data: Content, - }; - - /// https://en.wikipedia.org/wiki/Partition_type - pub const PartitionType = enum(u8) { - empty = 0x00, - - fat12 = 0x01, - ntfs = 0x07, - - fat32_chs = 0x0B, - fat32_lba = 0x0C, - - fat16_lba = 0x0E, - - linux_swap = 0x82, - linux_fs = 0x83, - linux_lvm = 0x8E, - - // Output from fdisk (util-linux 2.38.1) - // 00 Leer 27 Verst. NTFS Win 82 Linux Swap / So c1 DRDOS/sec (FAT- - // 01 FAT12 39 Plan 9 83 Linux c4 DRDOS/sec (FAT- - // 02 XENIX root 3c PartitionMagic 84 versteckte OS/2 c6 DRDOS/sec (FAT- - // 03 XENIX usr 40 Venix 80286 85 Linux erweitert c7 Syrinx - // 04 FAT16 <32M 41 PPC PReP Boot 86 NTFS Datenträge da Keine Dateisyst - // 05 Erweiterte 42 SFS 87 NTFS Datenträge db CP/M / CTOS / . - // 06 FAT16 4d QNX4.x 88 Linux Klartext de Dell Dienstprog - // 07 HPFS/NTFS/exFAT 4e QNX4.x 2. Teil 8e Linux LVM df BootIt - // 08 AIX 4f QNX4.x 3. Teil 93 Amoeba e1 DOS-Zugriff - // 09 AIX bootfähig 50 OnTrack DM 94 Amoeba BBT e3 DOS R/O - // 0a OS/2-Bootmanage 51 OnTrack DM6 Aux 9f BSD/OS e4 SpeedStor - // 0b W95 FAT32 52 CP/M a0 IBM Thinkpad Ru ea Linux erweitert - // 0c W95 FAT32 (LBA) 53 OnTrack DM6 Aux a5 FreeBSD eb BeOS Dateisyste - // 0e W95 FAT16 (LBA) 54 OnTrackDM6 a6 OpenBSD ee GPT - // 0f W95 Erw. (LBA) 55 EZ-Drive a7 NeXTSTEP ef EFI (FAT-12/16/ - // 10 OPUS 56 Golden Bow a8 Darwin UFS f0 Linux/PA-RISC B - // 11 Verst. FAT12 5c Priam Edisk a9 NetBSD f1 SpeedStor - // 12 Compaq Diagnost 61 SpeedStor ab Darwin Boot f4 SpeedStor - // 14 Verst. FAT16 <3 63 GNU HURD oder S af HFS / HFS+ f2 DOS sekundär - // 16 Verst. FAT16 64 Novell Netware b7 BSDi Dateisyste f8 EBBR geschützt - // 17 Verst. HPFS/NTF 65 Novell Netware b8 BSDI Swap fb VMware VMFS - // 18 AST SmartSleep 70 DiskSecure Mult bb Boot-Assistent fc VMware VMKCORE - // 1b Verst. W95 FAT3 75 PC/IX bc Acronis FAT32 L fd Linux RAID-Auto - // 1c Verst. W95 FAT3 80 Altes Minix be Solaris Boot fe LANstep - // 1e Verst. W95 FAT1 81 Minix / altes L bf Solaris ff BBT - // 24 NEC DOS - - _, - }; - - pub fn encodeMbrChsEntry(lba: u32) [3]u8 { - var chs = lbaToChs(lba); - - if (chs.cylinder >= 1024) { - chs = .{ - .cylinder = 1023, - .head = 255, - .sector = 63, - }; - } - - const cyl: u10 = @intCast(chs.cylinder); - const head: u8 = @intCast(chs.head); - const sect: u6 = @intCast(chs.sector); - - const sect_cyl: u8 = @as(u8, 0xC0) & @as(u8, @truncate(cyl >> 2)) + sect; - const sect_8: u8 = @truncate(cyl); - - return .{ head, sect_cyl, sect_8 }; - } - - const CHS = struct { - cylinder: u32, - head: u8, // limit: 256 - sector: u6, // limit: 64 - - pub fn init(c: u32, h: u8, s: u6) CHS { - return .{ .cylinder = c, .head = h, .sector = s }; - } - }; - - pub fn lbaToChs(lba: u32) CHS { - const hpc = 255; - const spt = 63; - - // C, H and S are the cylinder number, the head number, and the sector number - // LBA is the logical block address - // HPC is the maximum number of heads per cylinder (reported by disk drive, typically 16 for 28-bit LBA) - // SPT is the maximum number of sectors per track (reported by disk drive, typically 63 for 28-bit LBA) - // LBA = (C * HPC + H) * SPT + (S - 1) - - const sector = (lba % spt); - const cyl_head = (lba / spt); - - const head = (cyl_head % hpc); - const cyl = (cyl_head / hpc); - - return CHS{ - .sector = @intCast(sector + 1), - .head = @intCast(head), - .cylinder = cyl, - }; - } -}; - -// test "lba to chs" { -// // table from https://en.wikipedia.org/wiki/Logical_block_addressing#CHS_conversion -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 1), mbr.lbaToChs(0)); -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 2), mbr.lbaToChs(1)); -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 3), mbr.lbaToChs(2)); -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 63), mbr.lbaToChs(62)); -// try std.testing.expectEqual(mbr.CHS.init(0, 1, 1), mbr.lbaToChs(63)); -// try std.testing.expectEqual(mbr.CHS.init(0, 15, 1), mbr.lbaToChs(945)); -// try std.testing.expectEqual(mbr.CHS.init(0, 15, 63), mbr.lbaToChs(1007)); -// try std.testing.expectEqual(mbr.CHS.init(1, 0, 1), mbr.lbaToChs(1008)); -// try std.testing.expectEqual(mbr.CHS.init(1, 0, 63), mbr.lbaToChs(1070)); -// try std.testing.expectEqual(mbr.CHS.init(1, 1, 1), mbr.lbaToChs(1071)); -// try std.testing.expectEqual(mbr.CHS.init(1, 1, 63), mbr.lbaToChs(1133)); -// try std.testing.expectEqual(mbr.CHS.init(1, 2, 1), mbr.lbaToChs(1134)); -// try std.testing.expectEqual(mbr.CHS.init(1, 15, 63), mbr.lbaToChs(2015)); -// try std.testing.expectEqual(mbr.CHS.init(2, 0, 1), mbr.lbaToChs(2016)); -// try std.testing.expectEqual(mbr.CHS.init(15, 15, 63), mbr.lbaToChs(16127)); -// try std.testing.expectEqual(mbr.CHS.init(16, 0, 1), mbr.lbaToChs(16128)); -// try std.testing.expectEqual(mbr.CHS.init(31, 15, 63), mbr.lbaToChs(32255)); -// try std.testing.expectEqual(mbr.CHS.init(32, 0, 1), mbr.lbaToChs(32256)); -// try std.testing.expectEqual(mbr.CHS.init(16319, 15, 63), mbr.lbaToChs(16450559)); -// try std.testing.expectEqual(mbr.CHS.init(16382, 15, 63), mbr.lbaToChs(16514063)); -// } - -pub const gpt = struct { - pub const Guid = [16]u8; - - pub const Table = struct { - disk_id: Guid, - - partitions: []const Partition, - }; - - pub const Partition = struct { - type: Guid, - part_id: Guid, - - offset: ?u64 = null, - size: u64, - - name: [36]u16, - - attributes: Attributes, - - data: Content, - - pub const Attributes = packed struct(u32) { - system: bool, - efi_hidden: bool, - legacy: bool, - read_only: bool, - hidden: bool, - no_automount: bool, - - padding: u26 = 0, - }; - }; - - /// https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs - pub const PartitionType = struct { - pub const unused: Guid = .{}; - - pub const microsoft_basic_data: Guid = .{}; - pub const microsoft_reserved: Guid = .{}; - - pub const windows_recovery: Guid = .{}; - - pub const plan9: Guid = .{}; - - pub const linux_swap: Guid = .{}; - pub const linux_fs: Guid = .{}; - pub const linux_reserved: Guid = .{}; - pub const linux_lvm: Guid = .{}; - }; - - pub fn nameLiteral(comptime name: []const u8) [36]u16 { - return comptime blk: { - var buf: [36]u16 = undefined; - const len = std.unicode.utf8ToUtf16Le(&buf, name) catch |err| @compileError(@tagName(err)); - @memset(buf[len..], 0); - break :blk &buf; - }; - } -}; - -pub const FileSystem = struct { - pub const Format = union(enum) { - pub const Tag = std.meta.Tag(@This()); - - fat12, - fat16, - fat32, - - ext2, - ext3, - ext4, - - exfat, - ntfs, - - iso_9660, - iso_13490, - udf, - - /// usage: mkfs.<tool> <image> <base> <length> <filesystem> <ops...> - /// <image> is a path to the image file - /// <base> is the byte base of the file system - /// <length> is the byte length of the file system - /// <filesystem> is the file system that should be used to format - /// <ops...> is a list of operations that should be performed on the file system: - /// - format Formats the disk image. - /// - mount Mounts the file system, must be before all following: - /// - mkdir;<dst> Creates directory <dst> and all necessary parents. - /// - file;<src>;<dst> Copy <src> to path <dst>. If <dst> exists, it will be overwritten. - /// - dir;<src>;<dst> Copy <src> recursively into <dst>. If <dst> exists, they will be merged. - /// - /// <dst> paths are always rooted, even if they don't start with a /, and always use / as a path separator. - /// - custom: std.Build.LazyPath, - }; - - pub const Copy = struct { - source: std.Build.LazyPath, - destination: []const u8, - }; - - pub const Item = union(enum) { - empty_dir: []const u8, - copy_dir: Copy, - copy_file: Copy, - }; - - format: Format, - label: []const u8, - items: []const Item, - - // private: - executable: ?std.Build.LazyPath = null, -}; - -pub const FileSystemBuilder = struct { - b: *std.Build, - list: std.ArrayListUnmanaged(FileSystem.Item), - - pub fn init(b: *std.Build) FileSystemBuilder { - return FileSystemBuilder{ - .b = b, - .list = .{}, - }; - } - - pub fn finalize(fsb: *FileSystemBuilder, options: struct { - format: FileSystem.Format, - label: []const u8, - }) FileSystem { - return .{ - .format = options.format, - .label = fsb.b.dupe(options.label), - .items = fsb.list.toOwnedSlice(fsb.b.allocator) catch @panic("out of memory"), - }; - } - - pub fn addFile(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { - fsb.list.append(fsb.b.allocator, .{ - .copy_file = .{ - .source = source.dupe(fsb.b), - .destination = fsb.b.dupe(destination), - }, - }) catch @panic("out of memory"); - } - - pub fn addDirectory(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { - fsb.list.append(fsb.b.allocator, .{ - .copy_dir = .{ - .source = source.dupe(fsb.b), - .destination = fsb.b.dupe(destination), - }, - }) catch @panic("out of memory"); - } - - pub fn mkdir(fsb: *FileSystemBuilder, destination: []const u8) void { - fsb.list.append(fsb.b.allocator, .{ - .empty_dir = fsb.b.dupe(destination), - }) catch @panic("out of memory"); - } -}; diff --git a/concept/cli.txt b/concept/cli.txt new file mode 100644 index 0000000..ff90c70 --- /dev/null +++ b/concept/cli.txt @@ -0,0 +1,8 @@ +dim \ + --output zig-cache/disk.img \ + --size 64M \ + --script zig-cache/script.dis \ + PATH1=vendor/syslinux-6.03/…/mbr.bin \ + PATH2=… \ + PATH3=… \ + PATH4=… \ No newline at end of file diff --git a/concept/script.dis b/concept/script.dis new file mode 100644 index 0000000..97db606 --- /dev/null +++ b/concept/script.dis @@ -0,0 +1,31 @@ +mbr-part + bootloader $PATH1 + part # partition 1 + type fat32-lba + size 500M + bootable + contents + fat fat32 + label AshetOS + add-dir ../../rootfs . + add-dir $PATH2 . + apps/hello-world.ashex + copy-file $PATH3 apps/hello-gui.ashex + copy-file $PATH4 apps/clock.ashex + copy-file $PATH5 apps/paint.ashex + copy-file $PATH6 apps/init.ashex + copy-file $PATH7 apps/testing + copy-file $PATH8 apps/desktop + copy-file $PATH9 apps/testing/behaviour.ashex + copy-file $PATH10 apps/desktop/classic.ashex + copy-file $PATH11 ashet-os + copy-file ../../rootfs-x86/syslinux/modules.alias syslinux/modules.alias + copy-file ../../rootfs-x86/syslinux/pci.ids syslinux/pci.ids + copy-file ../../rootfs-x86/syslinux/syslinux.cfg syslinux/syslinux.cfg + copy-file $PATH12 syslinux/libmenu.c32 + … + endfat + endpart + ignore # partition 2 + ignore # partition 3 + ignore # partition 4 \ No newline at end of file diff --git a/src/build.old.zig b/src/build.old.zig new file mode 100644 index 0000000..bbc14d4 --- /dev/null +++ b/src/build.old.zig @@ -0,0 +1,1019 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +fn root() []const u8 { + return comptime (std.fs.path.dirname(@src().file) orelse "."); +} +const build_root = root(); + +pub const KiB = 1024; +pub const MiB = 1024 * KiB; +pub const GiB = 1024 * MiB; + +fn usageDemo( + b: *std.Build, + dependency: *std.Build.Dependency, + debug_step: *std.Build.Step, +) void { + installDebugDisk(dependency, debug_step, "uninitialized.img", 50 * MiB, .uninitialized); + + installDebugDisk(dependency, debug_step, "empty-mbr.img", 50 * MiB, .{ + .mbr = .{ + .partitions = .{ + null, + null, + null, + null, + }, + }, + }); + + installDebugDisk(dependency, debug_step, "manual-offset-mbr.img", 50 * MiB, .{ + .mbr = .{ + .partitions = .{ + &.{ .offset = 2048 + 0 * 10 * MiB, .size = 10 * MiB, .bootable = true, .type = .fat32_lba, .data = .uninitialized }, + &.{ .offset = 2048 + 1 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .ntfs, .data = .uninitialized }, + &.{ .offset = 2048 + 2 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_swap, .data = .uninitialized }, + &.{ .offset = 2048 + 3 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .uninitialized }, + }, + }, + }); + + installDebugDisk(dependency, debug_step, "auto-offset-mbr.img", 50 * MiB, .{ + .mbr = .{ + .partitions = .{ + &.{ .size = 7 * MiB, .bootable = true, .type = .fat32_lba, .data = .uninitialized }, + &.{ .size = 8 * MiB, .bootable = false, .type = .ntfs, .data = .uninitialized }, + &.{ .size = 9 * MiB, .bootable = false, .type = .linux_swap, .data = .uninitialized }, + &.{ .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .uninitialized }, + }, + }, + }); + + installDebugDisk(dependency, debug_step, "empty-fat32.img", 50 * MiB, .{ + .fs = .{ + .format = .fat32, + .label = "EMPTY", + .items = &.{}, + }, + }); + + installDebugDisk(dependency, debug_step, "initialized-fat32.img", 50 * MiB, .{ + .fs = .{ + .format = .fat32, + .label = "ROOTFS", + .items = &.{ + .{ .empty_dir = "boot/EFI/refind/icons" }, + .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, + .{ .empty_dir = "Users/xq/" }, + .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, + .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, + }, + }, + }); + + installDebugDisk(dependency, debug_step, "initialized-fat32-in-mbr-partitions.img", 100 * MiB, .{ + .mbr = .{ + .partitions = .{ + &.{ + .size = 90 * MiB, + .bootable = true, + .type = .fat32_lba, + .data = .{ + .fs = .{ + .format = .fat32, + .label = "ROOTFS", + .items = &.{ + .{ .empty_dir = "boot/EFI/refind/icons" }, + .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, + .{ .empty_dir = "Users/xq/" }, + .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, + .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, + }, + }, + }, + }, + null, + null, + null, + }, + }, + }); + + // TODO: Implement GPT partition support + // installDebugDisk(debug_step, "empty-gpt.img", 50 * MiB, .{ + // .gpt = .{ + // .partitions = &.{}, + // }, + // }); +} + +pub fn build(b: *std.Build) void { + // Steps: + + const debug_step = b.step("debug", "Builds a basic exemplary disk image."); + + // Dependency Setup: + + const zfat_dep = b.dependency("zfat", .{ + // .max_long_name_len = 121, + .code_page = .us, + .@"volume-count" = @as(u32, 1), + .@"sector-size" = @as(u32, 512), + // .rtc = .dynamic, + .mkfs = true, + .exfat = true, + }); + + const zfat_mod = zfat_dep.module("zfat"); + + const mkfs_fat = b.addExecutable(.{ + .name = "mkfs.fat", + .target = b.graph.host, + .optimize = .ReleaseSafe, + .root_source_file = b.path("src/mkfs.fat.zig"), + }); + mkfs_fat.root_module.addImport("fat", zfat_mod); + mkfs_fat.linkLibC(); + b.installArtifact(mkfs_fat); + + // Usage: + var self_dep: std.Build.Dependency = .{ + .builder = b, + }; + usageDemo(b, &self_dep, debug_step); +} + +fn resolveFilesystemMaker(dependency: *std.Build.Dependency, fs: FileSystem.Format) std.Build.LazyPath { + return switch (fs) { + .fat12, .fat16, .fat32, .exfat => dependency.artifact("mkfs.fat").getEmittedBin(), + + .custom => |path| path, + + else => std.debug.panic("Unsupported builtin file system: {s}", .{@tagName(fs)}), + }; +} + +fn relpath(b: *std.Build, path: []const u8) std.Build.LazyPath { + return .{ + .cwd_relative = b.pathFromRoot(path), + }; +} + +fn installDebugDisk( + dependency: *std.Build.Dependency, + install_step: *std.Build.Step, + name: []const u8, + size: u64, + content: Content, +) void { + const initialize_disk = initializeDisk(dependency, size, content); + const install_disk = install_step.owner.addInstallFile(initialize_disk.getImageFile(), name); + install_step.dependOn(&install_disk.step); +} + +pub fn initializeDisk(dependency: *std.Build.Dependency, size: u64, content: Content) *InitializeDiskStep { + const ids = dependency.builder.allocator.create(InitializeDiskStep) catch @panic("out of memory"); + + ids.* = .{ + .step = std.Build.Step.init(.{ + .owner = dependency.builder, // TODO: Is this correct? + .id = .custom, + .name = "initialize disk", + .makeFn = InitializeDiskStep.make, + .first_ret_addr = @returnAddress(), + .max_rss = 0, + }), + .disk_file = .{ .step = &ids.step }, + .content = content.dupe(dependency.builder) catch @panic("out of memory"), + .size = size, + }; + + ids.content.resolveFileSystems(dependency); + + ids.content.pushDependenciesTo(&ids.step); + + return ids; +} + +pub const InitializeDiskStep = struct { + const IoPump = std.fifo.LinearFifo(u8, .{ .Static = 8192 }); + + step: std.Build.Step, + + content: Content, + size: u64, + + disk_file: std.Build.GeneratedFile, + + pub fn getImageFile(ids: *InitializeDiskStep) std.Build.LazyPath { + return .{ .generated = .{ + .file = &ids.disk_file, + } }; + } + + fn addDirectoryToCache(b: *std.Build, manifest: *std.Build.Cache.Manifest, parent: std.fs.Dir, path: []const u8) !void { + var dir = try parent.openDir(path, .{ .iterate = true }); + defer dir.close(); + + var walker = try dir.walk(b.allocator); + defer walker.deinit(); + + while (try walker.next()) |entry| { + switch (entry.kind) { + .file => { + const abs_path = try entry.dir.realpathAlloc(b.allocator, entry.basename); + defer b.allocator.free(abs_path); + _ = try manifest.addFile(abs_path, null); + }, + .directory => try addDirectoryToCache(b, manifest, entry.dir, entry.basename), + + else => return error.Unsupported, + } + } + } + + fn addToCacheManifest(b: *std.Build, asking: *std.Build.Step, manifest: *std.Build.Cache.Manifest, content: Content) !void { + manifest.hash.addBytes(@tagName(content)); + switch (content) { + .uninitialized => {}, + + .mbr => |table| { // MbrTable + manifest.hash.addBytes(&table.bootloader); + for (table.partitions) |part_or_null| { + const part = part_or_null orelse { + manifest.hash.addBytes("none"); + break; + }; + manifest.hash.add(part.bootable); + manifest.hash.add(part.offset orelse 0x04_03_02_01); + manifest.hash.add(part.size); + manifest.hash.add(part.type); + try addToCacheManifest(b, asking, manifest, part.data); + } + }, + + .gpt => |table| { // GptTable + manifest.hash.addBytes(&table.disk_id); + + for (table.partitions) |part| { + manifest.hash.addBytes(&part.part_id); + manifest.hash.addBytes(&part.type); + manifest.hash.addBytes(std.mem.sliceAsBytes(&part.name)); + + manifest.hash.add(part.offset orelse 0x04_03_02_01); + manifest.hash.add(part.size); + + manifest.hash.add(@as(u32, @bitCast(part.attributes))); + + try addToCacheManifest(b, asking, manifest, part.data); + } + }, + + .fs => |fs| { // FileSystem + manifest.hash.add(@as(u64, fs.items.len)); + manifest.hash.addBytes(@tagName(fs.format)); + manifest.hash.addBytes(fs.executable.?.getPath2(b, asking)); + + // TODO: Properly add internal file system + for (fs.items) |entry| { + manifest.hash.addBytes(@tagName(entry)); + switch (entry) { + .empty_dir => |dir| { + manifest.hash.addBytes(dir); + }, + .copy_dir => |dir| { + manifest.hash.addBytes(dir.destination); + try addDirectoryToCache(b, manifest, std.fs.cwd(), dir.source.getPath2(b, asking)); + }, + .copy_file => |file| { + manifest.hash.addBytes(file.destination); + _ = try manifest.addFile(file.source.getPath2(b, asking), null); + }, + } + } + }, + .data => |data| { + const path = data.getPath2(b, asking); + _ = try manifest.addFile(path, null); + }, + .binary => |binary| { + const path = binary.getEmittedBin().getPath2(b, asking); + _ = try manifest.addFile(path, null); + }, + } + } + + const HumanContext = std.BoundedArray(u8, 256); + + const DiskImage = struct { + path: []const u8, + handle: *std.fs.File, + }; + + fn writeDiskImage(b: *std.Build, asking: *std.Build.Step, disk: DiskImage, base: u64, length: u64, content: Content, context: *HumanContext) !void { + try disk.handle.seekTo(base); + + const context_len = context.len; + defer context.len = context_len; + + context.appendSliceAssumeCapacity("."); + context.appendSliceAssumeCapacity(@tagName(content)); + + switch (content) { + .uninitialized => {}, + + .mbr => |table| { // MbrTable + { + var boot_sector: [512]u8 = .{0} ** 512; + + @memcpy(boot_sector[0..table.bootloader.len], &table.bootloader); + + std.mem.writeInt(u32, boot_sector[0x1B8..0x1BC], if (table.disk_id) |disk_id| disk_id else 0x0000_0000, .little); + std.mem.writeInt(u16, boot_sector[0x1BC..0x1BE], 0x0000, .little); + + var all_auto = true; + var all_manual = true; + for (table.partitions) |part_or_null| { + const part = part_or_null orelse continue; + + if (part.offset != null) { + all_auto = false; + } else { + all_manual = false; + } + } + + if (!all_auto and !all_manual) { + std.log.err("{s}: not all partitions have an explicit offset!", .{context.slice()}); + return error.InvalidSectorBoundary; + } + + const part_base = 0x01BE; + var auto_offset: u64 = 2048; + for (table.partitions, 0..) |part_or_null, part_id| { + const reset_len = context.len; + defer context.len = reset_len; + + var buffer: [64]u8 = undefined; + context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); + + const desc = boot_sector[part_base + 16 * part_id ..][0..16]; + + if (part_or_null) |part| { + // https://wiki.osdev.org/MBR#Partition_table_entry_format + + const part_offset = part.offset orelse auto_offset; + + if ((part_offset % 512) != 0) { + std.log.err("{s}: .offset is not divisible by 512!", .{context.slice()}); + return error.InvalidSectorBoundary; + } + if ((part.size % 512) != 0) { + std.log.err("{s}: .size is not divisible by 512!", .{context.slice()}); + return error.InvalidSectorBoundary; + } + + const lba_u64 = @divExact(part_offset, 512); + const size_u64 = @divExact(part.size, 512); + + const lba = std.math.cast(u32, lba_u64) orelse { + std.log.err("{s}: .offset is out of bounds!", .{context.slice()}); + return error.InvalidSectorBoundary; + }; + const size = std.math.cast(u32, size_u64) orelse { + std.log.err("{s}: .size is out of bounds!", .{context.slice()}); + return error.InvalidSectorBoundary; + }; + + desc[0] = if (part.bootable) 0x80 else 0x00; + + desc[1..4].* = mbr.encodeMbrChsEntry(lba); // chs_start + desc[4] = @intFromEnum(part.type); + desc[5..8].* = mbr.encodeMbrChsEntry(lba + size - 1); // chs_end + std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start + std.mem.writeInt(u32, desc[12..16], size, .little); // block_count + + auto_offset += part.size; + } else { + @memset(desc, 0); // inactive + } + } + boot_sector[0x01FE] = 0x55; + boot_sector[0x01FF] = 0xAA; + + try disk.handle.writeAll(&boot_sector); + } + + { + var auto_offset: u64 = 2048; + for (table.partitions, 0..) |part_or_null, part_id| { + const part = part_or_null orelse continue; + + const reset_len = context.len; + defer context.len = reset_len; + + var buffer: [64]u8 = undefined; + context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); + + try writeDiskImage(b, asking, disk, base + auto_offset, part.size, part.data, context); + + auto_offset += part.size; + } + } + }, + + .gpt => |table| { // GptTable + _ = table; + std.log.err("{s}: GPT partition tables not supported yet!", .{context.slice()}); + return error.GptUnsupported; + }, + + .fs => |fs| { + const maker_exe = fs.executable.?.getPath2(b, asking); + + try disk.handle.sync(); + + // const disk_image_path = switch (builtin.os.tag) { + // .linux => blk: { + // const self_pid = std.os.linux.getpid(); + // break :blk b.fmt("/proc/{}/fd/{}", .{ self_pid, disk.handle }); + // }, + + // else => @compileError("TODO: Support this on other OS as well!"), + // }; + + var argv = std.ArrayList([]const u8).init(b.allocator); + defer argv.deinit(); + + try argv.appendSlice(&.{ + maker_exe, // exe + disk.path, // image file + b.fmt("0x{X:0>8}", .{base}), // filesystem offset (bytes) + b.fmt("0x{X:0>8}", .{length}), // filesystem length (bytes) + @tagName(fs.format), // filesystem type + "format", // cmd 1: format the disk + "mount", // cmd 2: mount it internally + }); + + for (fs.items) |item| { + switch (item) { + .empty_dir => |dir| { + try argv.append(b.fmt("mkdir;{s}", .{dir})); + }, + .copy_dir => |src_dst| { + try argv.append(b.fmt("dir;{s};{s}", .{ + src_dst.source.getPath2(b, asking), + src_dst.destination, + })); + }, + .copy_file => |src_dst| { + try argv.append(b.fmt("file;{s};{s}", .{ + src_dst.source.getPath2(b, asking), + src_dst.destination, + })); + }, + } + } + + // use shared access to the file: + const stdout = b.run(argv.items); + + try disk.handle.sync(); + + _ = stdout; + }, + + .data => |data| { + const path = data.getPath2(b, asking); + try copyFileToImage(disk, length, std.fs.cwd(), path, context.slice()); + }, + + .binary => |binary| { + const path = binary.getEmittedBin().getPath2(b, asking); + try copyFileToImage(disk, length, std.fs.cwd(), path, context.slice()); + }, + } + } + + fn copyFileToImage(disk: DiskImage, max_length: u64, dir: std.fs.Dir, path: []const u8, context: []const u8) !void { + errdefer std.log.err("{s}: failed to copy data to image.", .{context}); + + var file = try dir.openFile(path, .{}); + defer file.close(); + + const stat = try file.stat(); + if (stat.size > max_length) { + var realpath_buffer: [std.fs.max_path_bytes]u8 = undefined; + std.log.err("{s}: The file '{!s}' exceeds the size of the container. The file is {:.2} large, while the container only allows for {:.2}.", .{ + context, + dir.realpath(path, &realpath_buffer), + std.fmt.fmtIntSizeBin(stat.size), + std.fmt.fmtIntSizeBin(max_length), + }); + return error.FileTooLarge; + } + + var pumper = IoPump.init(); + + try pumper.pump(file.reader(), disk.handle.writer()); + + const padding = max_length - stat.size; + if (padding > 0) { + try disk.handle.writer().writeByteNTimes(' ', padding); + } + } + + fn make(step: *std.Build.Step, options: std.Build.Step.MakeOptions) !void { + const b = step.owner; + _ = options; + + const ids: *InitializeDiskStep = @fieldParentPtr("step", step); + + var man = b.graph.cache.obtain(); + defer man.deinit(); + + man.hash.addBytes(&.{ 232, 8, 75, 249, 2, 210, 51, 118, 171, 12 }); // Change when impl changes + + try addToCacheManifest(b, step, &man, ids.content); + + step.result_cached = try step.cacheHit(&man); + const digest = man.final(); + + const output_components = .{ "o", &digest, "disk.img" }; + const output_sub_path = b.pathJoin(&output_components); + const output_sub_dir_path = std.fs.path.dirname(output_sub_path).?; + b.cache_root.handle.makePath(output_sub_dir_path) catch |err| { + return step.fail("unable to make path '{}{s}': {s}", .{ + b.cache_root, output_sub_dir_path, @errorName(err), + }); + }; + + ids.disk_file.path = try b.cache_root.join(b.allocator, &output_components); + + if (step.result_cached) + return; + + { + const disk_path = ids.disk_file.path.?; + + var disk = try std.fs.cwd().createFile(disk_path, .{}); + defer disk.close(); + + try disk.seekTo(ids.size - 1); + try disk.writeAll("\x00"); + try disk.seekTo(0); + + var context: HumanContext = .{}; + context.appendSliceAssumeCapacity("disk"); + + const disk_image = DiskImage{ + .path = disk_path, + .handle = &disk, + }; + + try writeDiskImage(b, step, disk_image, 0, ids.size, ids.content, &context); + } + + // if (!step.result_cached) + try step.writeManifest(&man); + } +}; + +pub const Content = union(enum) { + uninitialized, + + mbr: mbr.Table, + gpt: gpt.Table, + + fs: FileSystem, + + data: std.Build.LazyPath, + + binary: *std.Build.Step.Compile, + + pub fn dupe(content: Content, b: *std.Build) !Content { + const allocator = b.allocator; + + switch (content) { + .uninitialized => return content, + .mbr => |table| { + var copy = table; + for (©.partitions) |*part| { + if (part.*) |*p| { + const buf = try b.allocator.create(mbr.Partition); + buf.* = p.*.*; + buf.data = try buf.data.dupe(b); + p.* = buf; + } + } + return .{ .mbr = copy }; + }, + .gpt => |table| { + var copy = table; + const partitions = try allocator.dupe(gpt.Partition, table.partitions); + for (partitions) |*part| { + part.data = try part.data.dupe(b); + } + copy.partitions = partitions; + return .{ .gpt = copy }; + }, + .fs => |fs| { + var copy = fs; + + copy.label = try allocator.dupe(u8, fs.label); + const items = try allocator.dupe(FileSystem.Item, fs.items); + for (items) |*item| { + switch (item.*) { + .empty_dir => |*dir| { + dir.* = try allocator.dupe(u8, dir.*); + }, + .copy_dir, .copy_file => |*cp| { + const cp_new: FileSystem.Copy = .{ + .destination = try allocator.dupe(u8, cp.destination), + .source = cp.source.dupe(b), + }; + cp.* = cp_new; + }, + } + } + copy.items = items; + + switch (copy.format) { + .custom => |*path| path.* = path.dupe(b), + else => {}, + } + + return .{ .fs = copy }; + }, + .data => |data| { + return .{ .data = data.dupe(b) }; + }, + .binary => |binary| { + return .{ .binary = binary }; + }, + } + } + + pub fn pushDependenciesTo(content: Content, step: *std.Build.Step) void { + switch (content) { + .uninitialized => {}, + .mbr => |table| { + for (table.partitions) |part| { + if (part) |p| { + p.data.pushDependenciesTo(step); + } + } + }, + .gpt => |table| { + for (table.partitions) |part| { + part.data.pushDependenciesTo(step); + } + }, + .fs => |fs| { + for (fs.items) |item| { + switch (item) { + .empty_dir => {}, + .copy_dir, .copy_file => |*cp| { + cp.source.addStepDependencies(step); + }, + } + } + if (fs.format == .custom) { + fs.format.custom.addStepDependencies(step); + } + fs.executable.?.addStepDependencies(step); // Must be resolved already, invoke resolveFileSystems before! + }, + .data => |data| data.addStepDependencies(step), + .binary => |binary| step.dependOn(&binary.step), + } + } + + pub fn resolveFileSystems(content: *Content, dependency: *std.Build.Dependency) void { + switch (content.*) { + .uninitialized => {}, + .mbr => |*table| { + for (&table.partitions) |*part| { + if (part.*) |p| { + @constCast(&p.data).resolveFileSystems(dependency); + } + } + }, + .gpt => |*table| { + for (table.partitions) |*part| { + @constCast(&part.data).resolveFileSystems(dependency); + } + }, + .fs => |*fs| { + fs.executable = resolveFilesystemMaker(dependency, fs.format); + }, + .data, .binary => {}, + } + } +}; + +pub const mbr = struct { + pub const Table = struct { + bootloader: [440]u8 = .{0} ** 440, + disk_id: ?u32 = null, + partitions: [4]?*const Partition, + }; + + pub const Partition = struct { + offset: ?u64 = null, + size: u64, + + bootable: bool, + type: PartitionType, + + data: Content, + }; + + /// https://en.wikipedia.org/wiki/Partition_type + pub const PartitionType = enum(u8) { + empty = 0x00, + + fat12 = 0x01, + ntfs = 0x07, + + fat32_chs = 0x0B, + fat32_lba = 0x0C, + + fat16_lba = 0x0E, + + linux_swap = 0x82, + linux_fs = 0x83, + linux_lvm = 0x8E, + + // Output from fdisk (util-linux 2.38.1) + // 00 Leer 27 Verst. NTFS Win 82 Linux Swap / So c1 DRDOS/sec (FAT- + // 01 FAT12 39 Plan 9 83 Linux c4 DRDOS/sec (FAT- + // 02 XENIX root 3c PartitionMagic 84 versteckte OS/2 c6 DRDOS/sec (FAT- + // 03 XENIX usr 40 Venix 80286 85 Linux erweitert c7 Syrinx + // 04 FAT16 <32M 41 PPC PReP Boot 86 NTFS Datenträge da Keine Dateisyst + // 05 Erweiterte 42 SFS 87 NTFS Datenträge db CP/M / CTOS / . + // 06 FAT16 4d QNX4.x 88 Linux Klartext de Dell Dienstprog + // 07 HPFS/NTFS/exFAT 4e QNX4.x 2. Teil 8e Linux LVM df BootIt + // 08 AIX 4f QNX4.x 3. Teil 93 Amoeba e1 DOS-Zugriff + // 09 AIX bootfähig 50 OnTrack DM 94 Amoeba BBT e3 DOS R/O + // 0a OS/2-Bootmanage 51 OnTrack DM6 Aux 9f BSD/OS e4 SpeedStor + // 0b W95 FAT32 52 CP/M a0 IBM Thinkpad Ru ea Linux erweitert + // 0c W95 FAT32 (LBA) 53 OnTrack DM6 Aux a5 FreeBSD eb BeOS Dateisyste + // 0e W95 FAT16 (LBA) 54 OnTrackDM6 a6 OpenBSD ee GPT + // 0f W95 Erw. (LBA) 55 EZ-Drive a7 NeXTSTEP ef EFI (FAT-12/16/ + // 10 OPUS 56 Golden Bow a8 Darwin UFS f0 Linux/PA-RISC B + // 11 Verst. FAT12 5c Priam Edisk a9 NetBSD f1 SpeedStor + // 12 Compaq Diagnost 61 SpeedStor ab Darwin Boot f4 SpeedStor + // 14 Verst. FAT16 <3 63 GNU HURD oder S af HFS / HFS+ f2 DOS sekundär + // 16 Verst. FAT16 64 Novell Netware b7 BSDi Dateisyste f8 EBBR geschützt + // 17 Verst. HPFS/NTF 65 Novell Netware b8 BSDI Swap fb VMware VMFS + // 18 AST SmartSleep 70 DiskSecure Mult bb Boot-Assistent fc VMware VMKCORE + // 1b Verst. W95 FAT3 75 PC/IX bc Acronis FAT32 L fd Linux RAID-Auto + // 1c Verst. W95 FAT3 80 Altes Minix be Solaris Boot fe LANstep + // 1e Verst. W95 FAT1 81 Minix / altes L bf Solaris ff BBT + // 24 NEC DOS + + _, + }; + + pub fn encodeMbrChsEntry(lba: u32) [3]u8 { + var chs = lbaToChs(lba); + + if (chs.cylinder >= 1024) { + chs = .{ + .cylinder = 1023, + .head = 255, + .sector = 63, + }; + } + + const cyl: u10 = @intCast(chs.cylinder); + const head: u8 = @intCast(chs.head); + const sect: u6 = @intCast(chs.sector); + + const sect_cyl: u8 = @as(u8, 0xC0) & @as(u8, @truncate(cyl >> 2)) + sect; + const sect_8: u8 = @truncate(cyl); + + return .{ head, sect_cyl, sect_8 }; + } + + const CHS = struct { + cylinder: u32, + head: u8, // limit: 256 + sector: u6, // limit: 64 + + pub fn init(c: u32, h: u8, s: u6) CHS { + return .{ .cylinder = c, .head = h, .sector = s }; + } + }; + + pub fn lbaToChs(lba: u32) CHS { + const hpc = 255; + const spt = 63; + + // C, H and S are the cylinder number, the head number, and the sector number + // LBA is the logical block address + // HPC is the maximum number of heads per cylinder (reported by disk drive, typically 16 for 28-bit LBA) + // SPT is the maximum number of sectors per track (reported by disk drive, typically 63 for 28-bit LBA) + // LBA = (C * HPC + H) * SPT + (S - 1) + + const sector = (lba % spt); + const cyl_head = (lba / spt); + + const head = (cyl_head % hpc); + const cyl = (cyl_head / hpc); + + return CHS{ + .sector = @intCast(sector + 1), + .head = @intCast(head), + .cylinder = cyl, + }; + } +}; + +// test "lba to chs" { +// // table from https://en.wikipedia.org/wiki/Logical_block_addressing#CHS_conversion +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 1), mbr.lbaToChs(0)); +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 2), mbr.lbaToChs(1)); +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 3), mbr.lbaToChs(2)); +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 63), mbr.lbaToChs(62)); +// try std.testing.expectEqual(mbr.CHS.init(0, 1, 1), mbr.lbaToChs(63)); +// try std.testing.expectEqual(mbr.CHS.init(0, 15, 1), mbr.lbaToChs(945)); +// try std.testing.expectEqual(mbr.CHS.init(0, 15, 63), mbr.lbaToChs(1007)); +// try std.testing.expectEqual(mbr.CHS.init(1, 0, 1), mbr.lbaToChs(1008)); +// try std.testing.expectEqual(mbr.CHS.init(1, 0, 63), mbr.lbaToChs(1070)); +// try std.testing.expectEqual(mbr.CHS.init(1, 1, 1), mbr.lbaToChs(1071)); +// try std.testing.expectEqual(mbr.CHS.init(1, 1, 63), mbr.lbaToChs(1133)); +// try std.testing.expectEqual(mbr.CHS.init(1, 2, 1), mbr.lbaToChs(1134)); +// try std.testing.expectEqual(mbr.CHS.init(1, 15, 63), mbr.lbaToChs(2015)); +// try std.testing.expectEqual(mbr.CHS.init(2, 0, 1), mbr.lbaToChs(2016)); +// try std.testing.expectEqual(mbr.CHS.init(15, 15, 63), mbr.lbaToChs(16127)); +// try std.testing.expectEqual(mbr.CHS.init(16, 0, 1), mbr.lbaToChs(16128)); +// try std.testing.expectEqual(mbr.CHS.init(31, 15, 63), mbr.lbaToChs(32255)); +// try std.testing.expectEqual(mbr.CHS.init(32, 0, 1), mbr.lbaToChs(32256)); +// try std.testing.expectEqual(mbr.CHS.init(16319, 15, 63), mbr.lbaToChs(16450559)); +// try std.testing.expectEqual(mbr.CHS.init(16382, 15, 63), mbr.lbaToChs(16514063)); +// } + +pub const gpt = struct { + pub const Guid = [16]u8; + + pub const Table = struct { + disk_id: Guid, + + partitions: []const Partition, + }; + + pub const Partition = struct { + type: Guid, + part_id: Guid, + + offset: ?u64 = null, + size: u64, + + name: [36]u16, + + attributes: Attributes, + + data: Content, + + pub const Attributes = packed struct(u32) { + system: bool, + efi_hidden: bool, + legacy: bool, + read_only: bool, + hidden: bool, + no_automount: bool, + + padding: u26 = 0, + }; + }; + + /// https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs + pub const PartitionType = struct { + pub const unused: Guid = .{}; + + pub const microsoft_basic_data: Guid = .{}; + pub const microsoft_reserved: Guid = .{}; + + pub const windows_recovery: Guid = .{}; + + pub const plan9: Guid = .{}; + + pub const linux_swap: Guid = .{}; + pub const linux_fs: Guid = .{}; + pub const linux_reserved: Guid = .{}; + pub const linux_lvm: Guid = .{}; + }; + + pub fn nameLiteral(comptime name: []const u8) [36]u16 { + return comptime blk: { + var buf: [36]u16 = undefined; + const len = std.unicode.utf8ToUtf16Le(&buf, name) catch |err| @compileError(@tagName(err)); + @memset(buf[len..], 0); + break :blk &buf; + }; + } +}; + +pub const FileSystem = struct { + pub const Format = union(enum) { + pub const Tag = std.meta.Tag(@This()); + + fat12, + fat16, + fat32, + + ext2, + ext3, + ext4, + + exfat, + ntfs, + + iso_9660, + iso_13490, + udf, + + /// usage: mkfs.<tool> <image> <base> <length> <filesystem> <ops...> + /// <image> is a path to the image file + /// <base> is the byte base of the file system + /// <length> is the byte length of the file system + /// <filesystem> is the file system that should be used to format + /// <ops...> is a list of operations that should be performed on the file system: + /// - format Formats the disk image. + /// - mount Mounts the file system, must be before all following: + /// - mkdir;<dst> Creates directory <dst> and all necessary parents. + /// - file;<src>;<dst> Copy <src> to path <dst>. If <dst> exists, it will be overwritten. + /// - dir;<src>;<dst> Copy <src> recursively into <dst>. If <dst> exists, they will be merged. + /// + /// <dst> paths are always rooted, even if they don't start with a /, and always use / as a path separator. + /// + custom: std.Build.LazyPath, + }; + + pub const Copy = struct { + source: std.Build.LazyPath, + destination: []const u8, + }; + + pub const Item = union(enum) { + empty_dir: []const u8, + copy_dir: Copy, + copy_file: Copy, + }; + + format: Format, + label: []const u8, + items: []const Item, + + // private: + executable: ?std.Build.LazyPath = null, +}; + +pub const FileSystemBuilder = struct { + b: *std.Build, + list: std.ArrayListUnmanaged(FileSystem.Item), + + pub fn init(b: *std.Build) FileSystemBuilder { + return FileSystemBuilder{ + .b = b, + .list = .{}, + }; + } + + pub fn finalize(fsb: *FileSystemBuilder, options: struct { + format: FileSystem.Format, + label: []const u8, + }) FileSystem { + return .{ + .format = options.format, + .label = fsb.b.dupe(options.label), + .items = fsb.list.toOwnedSlice(fsb.b.allocator) catch @panic("out of memory"), + }; + } + + pub fn addFile(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { + fsb.list.append(fsb.b.allocator, .{ + .copy_file = .{ + .source = source.dupe(fsb.b), + .destination = fsb.b.dupe(destination), + }, + }) catch @panic("out of memory"); + } + + pub fn addDirectory(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { + fsb.list.append(fsb.b.allocator, .{ + .copy_dir = .{ + .source = source.dupe(fsb.b), + .destination = fsb.b.dupe(destination), + }, + }) catch @panic("out of memory"); + } + + pub fn mkdir(fsb: *FileSystemBuilder, destination: []const u8) void { + fsb.list.append(fsb.b.allocator, .{ + .empty_dir = fsb.b.dupe(destination), + }) catch @panic("out of memory"); + } +}; diff --git a/src/dim.zig b/src/dim.zig new file mode 100644 index 0000000..318cf6f --- /dev/null +++ b/src/dim.zig @@ -0,0 +1,16 @@ +//! +//! Disk Imager Command Line +//! +const std = @import("std"); + +const Tokenizer = @import("tokenizer.zig"); + +pub fn main() !void { + + // + +} + +test { + _ = Tokenizer; +} diff --git a/src/tokenizer.zig b/src/tokenizer.zig new file mode 100644 index 0000000..3a02b4f --- /dev/null +++ b/src/tokenizer.zig @@ -0,0 +1,153 @@ +const std = @import("std"); + +const Tokenizer = @This(); + +pub const TokenType = enum { + /// `\S+` + word, + + /// `\$\w+` + variable, + + /// `\s+` + whitespace, + + /// `/#[^\n]*\n/` + comment, + + /// `/"([^"]|\\")*"/` + string, +}; + +pub const Token = struct { + offset: u32, + len: u32, + type: TokenType, +}; + +source: []const u8, +index: usize = 0, + +pub fn init(source: []const u8) Tokenizer { + return .{ .source = source }; +} + +pub const Error = error{ + SourceInputTooLarge, +}; + +pub fn next(tk: *Tokenizer) Error!?Token { + if (tk.index >= tk.source.len) + return null; + if (tk.index >= std.math.maxInt(u32)) { + return error.SourceInputTooLarge; + } + + const start = tk.index; + const first = tk.source[start]; + + if (std.ascii.isWhitespace(first)) { + while (tk.index < tk.source.len and std.ascii.isWhitespace(tk.source[tk.index])) { + tk.index += 1; + } + return .{ + .offset = @intCast(start), + .len = @intCast(tk.index - start), + .type = .whitespace, + }; + } + if (first == '#') { + while (tk.index < tk.source.len and tk.source[tk.index] != '\n') { + tk.index += 1; + } + return .{ + .offset = @intCast(start), + .len = @intCast(tk.index - start), + .type = .comment, + }; + } + + if (first == '"') { + tk.index += 1; + + while (tk.index < tk.source.len) { + const chr = tk.source[tk.index]; + tk.index += 1; + + if (chr == '"') + break; + + if (chr == '\\') + tk.index += 1; + } + + return .{ + .offset = @intCast(start), + .len = @intCast(tk.index - start), + .type = .string, + }; + } + + var ttype: TokenType = .word; + if (first == '$') { + tk.index += 1; + ttype = .variable; + } + while (tk.index < tk.source.len and !std.ascii.isWhitespace(tk.source[tk.index])) { + tk.index += 1; + } + return .{ + .offset = @intCast(start), + .len = @intCast(tk.index - start), + .type = ttype, + }; +} + +fn run_fuzz_test(_: void, input: []const u8) !void { + var tokenizer = init(input); + + while (try tokenizer.next()) |_| {} +} + +test "fuzz Tokenizer" { + try std.testing.fuzz({}, run_fuzz_test, .{}); +} + +test Tokenizer { + const seq: []const struct { TokenType, []const u8 } = &.{ + .{ .word, "hello" }, + .{ .whitespace, " " }, + .{ .word, "world" }, + .{ .whitespace, "\n " }, + .{ .variable, "$foobar" }, + .{ .whitespace, " " }, + .{ .comment, "# hello, this is a comment" }, + .{ .whitespace, "\n" }, + .{ .string, "\"stringy content\"" }, + }; + + var tokenizer = init( + \\hello world + \\ $foobar # hello, this is a comment + \\"stringy content" + ); + + var offset: u32 = 0; + for (seq) |expected| { + const actual = (try tokenizer.next()) orelse return error.Unexpected; + errdefer std.debug.print("unexpected token: .{} \"{}\"\n", .{ + std.zig.fmtId(@tagName(actual.type)), + std.zig.fmtEscapes(tokenizer.source[actual.offset..][0..actual.len]), + }); + try std.testing.expectEqual(offset, actual.offset); + try std.testing.expectEqual(expected.@"0", actual.type); + try std.testing.expectEqual(expected.@"1".len, actual.len); + offset += actual.len; + } + try std.testing.expectEqual(null, try tokenizer.next()); +} + +test "empty file" { + var tokenizer = init(""); + try std.testing.expectEqual(null, try tokenizer.next()); +} From ecb7e37458b8f1eb3b64ea67bf09130dd6c1e40f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Quei=C3=9Fner?= <git@random-projects.net> Date: Tue, 4 Mar 2025 22:09:31 +0100 Subject: [PATCH 02/26] Adds basic parser with include file support and variable substitution --- build.zig.zon | 2 +- concept/script.dis | 2 +- src/Parser.zig | 418 +++++++++++++++++++++++++++ src/{tokenizer.zig => Tokenizer.zig} | 96 ++++-- src/dim.zig | 4 +- 5 files changed, 497 insertions(+), 25 deletions(-) create mode 100644 src/Parser.zig rename src/{tokenizer.zig => Tokenizer.zig} (61%) diff --git a/build.zig.zon b/build.zig.zon index 6d4a4c9..890522b 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,6 +1,6 @@ .{ .name = .disk_image_step, - .version = "0.1.0", + .version = "2.0.0", .fingerprint = 0xdaabde74a06664f7, .dependencies = .{ .zfat = .{ diff --git a/concept/script.dis b/concept/script.dis index 97db606..a847fca 100644 --- a/concept/script.dis +++ b/concept/script.dis @@ -9,7 +9,7 @@ mbr-part label AshetOS add-dir ../../rootfs . add-dir $PATH2 . - apps/hello-world.ashex + copy-file $PATH3 apps/hello-world.ashex copy-file $PATH3 apps/hello-gui.ashex copy-file $PATH4 apps/clock.ashex copy-file $PATH5 apps/paint.ashex diff --git a/src/Parser.zig b/src/Parser.zig new file mode 100644 index 0000000..21babe6 --- /dev/null +++ b/src/Parser.zig @@ -0,0 +1,418 @@ +const std = @import("std"); + +const Tokenizer = @import("Tokenizer.zig"); + +const Token = Tokenizer.Token; +const TokenType = Tokenizer.TokenType; + +const Parser = @This(); + +pub const Error = Tokenizer.Error || error{ + FileNotFound, + UnknownVariable, + IoError, + BadDirective, + MaxIncludeDepthReached, + ExpectedIncludePath, + UnknownDirective, + OutOfMemory, +}; + +pub const IO = struct { + fetch_file_fn: *const fn (io: *const IO, std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8, + resolve_variable_fn: *const fn (io: *const IO, name: []const u8) error{UnknownVariable}![]const u8, + + pub fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { + return io.fetch_file_fn(io, allocator, path); + } + + pub fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { + return io.resolve_variable_fn(io, name); + } +}; + +const File = struct { + path: []const u8, + tokenizer: Tokenizer, + free: bool, +}; + +allocator: std.mem.Allocator, +io: IO, + +file_stack: []File, +max_include_depth: usize, + +pub const InitOptions = struct { + max_include_depth: usize, +}; +pub fn init(allocator: std.mem.Allocator, io: IO, options: InitOptions) error{OutOfMemory}!Parser { + var slice = try allocator.alloc(File, options.max_include_depth); + slice.len = 0; + return .{ + .allocator = allocator, + .io = io, + .max_include_depth = options.max_include_depth, + .file_stack = slice, + }; +} + +pub fn deinit(parser: *Parser) void { + for (parser.file_stack) |file| { + if (file.free) { + parser.allocator.free(file.path); + parser.allocator.free(file.tokenizer.source); + } + } + parser.file_stack.len = parser.max_include_depth; + parser.allocator.free(parser.file_stack); + parser.* = undefined; +} + +pub fn push_source(parser: *Parser, options: struct { + path: []const u8, + contents: []const u8, +}) !void { + std.debug.assert(parser.file_stack.len <= parser.max_include_depth); + if (parser.file_stack.len == parser.max_include_depth) + return error.MaxIncludeDepthReached; + + const index = parser.file_stack.len; + parser.file_stack.len += 1; + + parser.file_stack[index] = .{ + .path = options.path, + .tokenizer = .init(options.contents), + .free = false, + }; +} + +pub fn push_file(parser: *Parser, include_path: []const u8) !void { + std.debug.assert(parser.file_stack.len <= parser.max_include_depth); + if (parser.file_stack.len == parser.max_include_depth) + return error.MaxIncludeDepthReached; + + const top_path = if (parser.file_stack.len > 0) + parser.file_stack[parser.file_stack.len - 1].path + else + ""; + + const abs_include_path = try std.fs.path.resolvePosix( + parser.allocator, + &.{ + std.fs.path.dirnamePosix(top_path) orelse ".", + include_path, + }, + ); + errdefer parser.allocator.free(abs_include_path); + + const file_contents = try parser.io.fetch_file(parser.allocator, abs_include_path); + errdefer parser.allocator.free(file_contents); + + const index = parser.file_stack.len; + parser.file_stack.len += 1; + + parser.file_stack[index] = .{ + .path = abs_include_path, + .tokenizer = .init(file_contents), + .free = true, + }; +} + +pub fn next(parser: *Parser) Error!?[]const u8 { + if (parser.file_stack.len == 0) + return null; + + while (true) { + const top = &parser.file_stack[parser.file_stack.len - 1]; + + const token = if (try fetch_token(&top.tokenizer)) |tok| + tok + else + return null; + + switch (token.type) { + .whitespace, .comment => unreachable, + + .word, .variable, .string => return try parser.resolve_value( + token.type, + top.tokenizer.get_text(token), + ), + + .directive => { + const directive = top.tokenizer.get_text(token); + + if (std.mem.eql(u8, directive, "!include")) { + if (try fetch_token(&top.tokenizer)) |path_token| { + const rel_include_path = switch (path_token.type) { + .word, .variable, .string => try parser.resolve_value( + path_token.type, + top.tokenizer.get_text(path_token), + ), + .comment, .directive, .whitespace => return error.BadDirective, + }; + + try parser.push_file(rel_include_path); + } else { + return error.ExpectedIncludePath; + } + } else { + return error.UnknownDirective; + } + }, + } + } +} + +fn fetch_token(tok: *Tokenizer) Tokenizer.Error!?Token { + while (true) { + const token = if (try tok.next()) |t| + t + else + return null; + + switch (token.type) { + // Skipped: + .whitespace, .comment => {}, + + else => return token, + } + } +} + +fn resolve_value(parser: *Parser, token_type: TokenType, text: []const u8) ![]const u8 { + return switch (token_type) { + .word => text, + + .variable => try parser.io.resolve_variable( + text[1..], + ), + + .string => { + for (text) |c| { + if (c == '\\') + @panic("strings escapes not supported yet!"); + } + return text[1 .. text.len - 1]; + }, + + .comment, .directive, .whitespace => unreachable, + }; +} + +test Parser { + const io: IO = .{ + .fetch_file_fn = undefined, + .resolve_variable_fn = undefined, + }; + + var parser: Parser = try .init(std.testing.allocator, io, .{ + .max_include_depth = 8, + }); + defer parser.deinit(); + + try parser.push_source(.{ + .path = "test.script", + .contents = + \\mbr-part + \\ bootloader PATH1 + \\ part # partition 1 + \\ type fat32-lba + \\ size 500M + \\ bootable + \\ contents + \\ fat32 ... + , + }); + + const sequence: []const []const u8 = &.{ + "mbr-part", + "bootloader", + "PATH1", + "part", + "type", + "fat32-lba", + "size", + "500M", + "bootable", + "contents", + "fat32", + "...", + }; + + for (sequence) |item| { + try std.testing.expectEqualStrings(item, (try parser.next()).?); + } + + try std.testing.expectEqual(null, parser.next()); +} + +test "parser with variables" { + const MyIO = struct { + fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { + _ = io; + if (std.mem.eql(u8, name, "DISK")) + return "./zig-out/disk.img"; + if (std.mem.eql(u8, name, "KERNEL")) + return "./zig-out/bin/kernel.elf"; + return error.UnknownVariable; + } + }; + const io: IO = .{ + .fetch_file_fn = undefined, + .resolve_variable_fn = MyIO.resolve_variable, + }; + + var parser: Parser = try .init(std.testing.allocator, io, .{ + .max_include_depth = 8, + }); + defer parser.deinit(); + + try parser.push_source(.{ + .path = "test.script", + .contents = + \\select-disk $DISK + \\copy-file $KERNEL /BOOT/vzlinuz + \\ + , + }); + + const sequence: []const []const u8 = &.{ + "select-disk", + "./zig-out/disk.img", + "copy-file", + "./zig-out/bin/kernel.elf", + "/BOOT/vzlinuz", + }; + + for (sequence) |item| { + try std.testing.expectEqualStrings(item, (try parser.next()).?); + } + + try std.testing.expectEqual(null, parser.next()); +} + +test "parser with variables and include files" { + const MyIO = struct { + fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { + _ = io; + if (std.mem.eql(u8, name, "DISK")) + return "./zig-out/disk.img"; + if (std.mem.eql(u8, name, "KERNEL")) + return "./zig-out/bin/kernel.elf"; + return error.UnknownVariable; + } + fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { + _ = io; + if (std.mem.eql(u8, path, "path/parent/kernel.script")) + return try allocator.dupe(u8, "copy-file $KERNEL /BOOT/vzlinuz"); + return error.FileNotFound; + } + }; + const io: IO = .{ + .fetch_file_fn = MyIO.fetch_file, + .resolve_variable_fn = MyIO.resolve_variable, + }; + + var parser: Parser = try .init(std.testing.allocator, io, .{ + .max_include_depth = 8, + }); + defer parser.deinit(); + + try parser.push_source(.{ + .path = "path/to/test.script", + .contents = + \\select-disk $DISK + \\!include "../parent/kernel.script" + \\ + , + }); + + const sequence: []const []const u8 = &.{ + "select-disk", + "./zig-out/disk.img", + "copy-file", + "./zig-out/bin/kernel.elf", + "/BOOT/vzlinuz", + }; + + for (sequence) |item| { + try std.testing.expectEqualStrings(item, (try parser.next()).?); + } + + try std.testing.expectEqual(null, parser.next()); +} + +test "parse nothing" { + const io: IO = .{ + .fetch_file_fn = undefined, + .resolve_variable_fn = undefined, + }; + + var parser: Parser = try .init(std.testing.allocator, io, .{ + .max_include_depth = 8, + }); + defer parser.deinit(); + + try std.testing.expectEqual(null, parser.next()); +} + +fn fuzz_parser(_: void, input: []const u8) !void { + const FuzzIO = struct { + fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { + _ = io; + _ = allocator; + _ = path; + return error.FileNotFound; + } + fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { + _ = io; + return name; + } + }; + + const io: IO = .{ + .fetch_file_fn = FuzzIO.fetch_file, + .resolve_variable_fn = FuzzIO.resolve_variable, + }; + + var parser: Parser = try .init(std.testing.allocator, io, .{ + .max_include_depth = 8, + }); + defer parser.deinit(); + + try parser.push_source(.{ + .path = "fuzz.script", + .contents = input, + }); + + while (true) { + const res = parser.next() catch |err| switch (err) { + error.UnknownDirective, + error.UnknownVariable, + error.BadDirective, + error.FileNotFound, + error.ExpectedIncludePath, + => continue, + + error.MaxIncludeDepthReached, + error.IoError, + error.SourceInputTooLarge, + => @panic("reached impossible case for fuzz testing"), + + error.OutOfMemory => |e| return e, + + // Fine, must just terminate the parse loop: + error.InvalidSourceEncoding, + error.BadStringLiteral, + error.BadEscapeSequence, + => return, + }; + if (res == null) + break; + } +} + +test "fuzz parser" { + try std.testing.fuzz({}, fuzz_parser, .{}); +} diff --git a/src/tokenizer.zig b/src/Tokenizer.zig similarity index 61% rename from src/tokenizer.zig rename to src/Tokenizer.zig index 3a02b4f..cdac717 100644 --- a/src/tokenizer.zig +++ b/src/Tokenizer.zig @@ -9,6 +9,9 @@ pub const TokenType = enum { /// `\$\w+` variable, + /// `!\w+` + directive, + /// `\s+` whitespace, @@ -34,21 +37,27 @@ pub fn init(source: []const u8) Tokenizer { pub const Error = error{ SourceInputTooLarge, + InvalidSourceEncoding, + BadEscapeSequence, + BadStringLiteral, }; -pub fn next(tk: *Tokenizer) Error!?Token { - if (tk.index >= tk.source.len) - return null; - if (tk.index >= std.math.maxInt(u32)) { - return error.SourceInputTooLarge; - } +pub fn get_text(tk: Tokenizer, token: Token) []const u8 { + return tk.source[token.offset..][0..token.len]; +} +pub fn next(tk: *Tokenizer) Error!?Token { const start = tk.index; - const first = tk.source[start]; + const first = if (try tk.next_char()) |char| + char + else + return null; if (std.ascii.isWhitespace(first)) { - while (tk.index < tk.source.len and std.ascii.isWhitespace(tk.source[tk.index])) { - tk.index += 1; + while (try tk.peek_char()) |c| { + if (!std.ascii.isWhitespace(c)) + break; + tk.take_char(c); } return .{ .offset = @intCast(start), @@ -56,9 +65,12 @@ pub fn next(tk: *Tokenizer) Error!?Token { .type = .whitespace, }; } + if (first == '#') { - while (tk.index < tk.source.len and tk.source[tk.index] != '\n') { - tk.index += 1; + while (try tk.peek_char()) |c| { + if (c == '\n') + break; + tk.take_char(c); } return .{ .offset = @intCast(start), @@ -70,16 +82,20 @@ pub fn next(tk: *Tokenizer) Error!?Token { if (first == '"') { tk.index += 1; - while (tk.index < tk.source.len) { - const chr = tk.source[tk.index]; - tk.index += 1; - - if (chr == '"') + var string_ok = false; + while (try tk.peek_char()) |c| { + tk.take_char(c); + if (c == '"') { + string_ok = true; break; - - if (chr == '\\') - tk.index += 1; + } + if (c == '\\') { + if ((try tk.next_char()) == null) + return error.BadEscapeSequence; + } } + if (!string_ok) + return error.BadStringLiteral; return .{ .offset = @intCast(start), @@ -92,9 +108,14 @@ pub fn next(tk: *Tokenizer) Error!?Token { if (first == '$') { tk.index += 1; ttype = .variable; - } - while (tk.index < tk.source.len and !std.ascii.isWhitespace(tk.source[tk.index])) { + } else if (first == '!') { tk.index += 1; + ttype = .directive; + } + while (try tk.peek_char()) |c| { + if (std.ascii.isWhitespace(c)) + break; + tk.take_char(c); } return .{ .offset = @intCast(start), @@ -103,10 +124,40 @@ pub fn next(tk: *Tokenizer) Error!?Token { }; } +fn peek_char(tk: Tokenizer) error{ SourceInputTooLarge, InvalidSourceEncoding }!?u8 { + if (tk.index >= tk.source.len) + return null; + + if (tk.index >= std.math.maxInt(u32)) + return error.SourceInputTooLarge; + + const char = tk.source[tk.index]; + if (char < 0x20 and !std.ascii.isWhitespace(char)) + return error.InvalidSourceEncoding; + + return char; +} + +fn take_char(tk: *Tokenizer, c: u8) void { + std.debug.assert(tk.source[tk.index] == c); + tk.index += 1; +} + +fn next_char(tk: *Tokenizer) error{ SourceInputTooLarge, InvalidSourceEncoding }!?u8 { + const char = try tk.peek_char(); + if (char) |c| + tk.take_char(c); + return char; +} + fn run_fuzz_test(_: void, input: []const u8) !void { var tokenizer = init(input); - while (try tokenizer.next()) |_| {} + while (true) { + const tok = tokenizer.next() catch return; + if (tok == null) + break; + } } test "fuzz Tokenizer" { @@ -139,6 +190,7 @@ test Tokenizer { std.zig.fmtId(@tagName(actual.type)), std.zig.fmtEscapes(tokenizer.source[actual.offset..][0..actual.len]), }); + try std.testing.expectEqualStrings(expected.@"1", tokenizer.get_text(actual)); try std.testing.expectEqual(offset, actual.offset); try std.testing.expectEqual(expected.@"0", actual.type); try std.testing.expectEqual(expected.@"1".len, actual.len); diff --git a/src/dim.zig b/src/dim.zig index 318cf6f..290b5df 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -3,7 +3,8 @@ //! const std = @import("std"); -const Tokenizer = @import("tokenizer.zig"); +const Tokenizer = @import("Tokenizer.zig"); +const Parser = @import("Parser.zig"); pub fn main() !void { @@ -13,4 +14,5 @@ pub fn main() !void { test { _ = Tokenizer; + _ = Parser; } From bf4674d8fcdd9d4f7252917f0634457a329d3498 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki <der.teufel.mail@gmail.com> Date: Wed, 5 Mar 2025 13:00:37 +0100 Subject: [PATCH 03/26] Add zig-args as a dependency --- build.zig | 4 ++++ build.zig.zon | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/build.zig b/build.zig index a421305..1a5293f 100644 --- a/build.zig +++ b/build.zig @@ -30,11 +30,15 @@ pub fn build(b: *std.Build) void { // mkfs_fat.linkLibC(); // b.installArtifact(mkfs_fat); + const args_dep = b.dependency("args", .{}); + const args_mod = args_dep.module("args"); + const dim_mod = b.addModule("dim", .{ .root_source_file = b.path("src/dim.zig"), .target = target, .optimize = optimize, }); + dim_mod.addImport("args", args_mod); const dim_exe = b.addExecutable(.{ .name = "dim", diff --git a/build.zig.zon b/build.zig.zon index 890522b..ad2b257 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -7,6 +7,10 @@ .url = "https://github.com/ZigEmbeddedGroup/zfat/archive/3ce06d43a4e04d387034dcae2f486b050701f321.tar.gz", .hash = "12205d874e8c9fd08d93c09ccbfddb045809afcc28e232b36b5abe3d288278ce458f", }, + .args = .{ + .url = "git+https://github.com/ikskuh/zig-args.git#9425b94c103a031777fdd272c555ce93a7dea581", + .hash = "args-0.0.0-CiLiqv_NAAC97fGpk9hS2K681jkiqPsWP6w3ucb_ctGH", + }, }, .paths = .{ "build.zig", From 2abd9bf43d15b5a55812cc145c537466b8451af2 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki <der.teufel.mail@gmail.com> Date: Wed, 5 Mar 2025 13:01:42 +0100 Subject: [PATCH 04/26] First CLI args usage --- src/dim.zig | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/src/dim.zig b/src/dim.zig index 290b5df..a646418 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -5,11 +5,73 @@ const std = @import("std"); const Tokenizer = @import("Tokenizer.zig"); const Parser = @import("Parser.zig"); +const args = @import("args"); + +const Options = struct { + output: ?[]const u8 = null, + size: ?u32 = null, + script: ?[]const u8 = null, + @"import-env": bool = false, +}; + +const usage = + \\dim OPTIONS [VARS] + \\ + \\OPTIONS: + \\ --output <path> + \\ mandatory: where to store the output file + \\[--size <size>] + \\ optional: how big is the resulting disk image? allowed suffixes: k,K,M,G + \\ --script <path> + \\ mandatory: which script file to execute? + \\[--import-env] + \\ optional: if set, imports the current process environment into the variables + \\VARS: + \\{ KEY=VALUE }* + \\ multiple ≥ 0: Sets variable KEY to VALUE + \\ +; pub fn main() !void { + var gpa_impl: std.heap.DebugAllocator(.{}) = .init; + defer _ = gpa_impl.deinit(); + + const gpa = gpa_impl.allocator(); + + const opts = try args.parseForCurrentProcess(Options, gpa, .print); + defer opts.deinit(); + + var var_map: std.StringArrayHashMapUnmanaged([]const u8) = .empty; + defer var_map.deinit(gpa); - // + for (opts.positionals) |pos| { + if (std.mem.indexOfScalar(u8, pos, '=')) |idx| { + const key = pos[0..idx]; + const val = pos[idx + 1 ..]; + try var_map.put(gpa, key, val); + } + } + + const options = opts.options; + + if (options.output == null) { + fatal("No output path specified"); + } + + if (options.script == null) { + fatal("No script specified"); + } + + std.debug.print( + "Output={?s} Script={?s} Size={?} import-env={}\n", + .{ options.output, options.script, options.size, options.@"import-env" }, + ); +} +fn fatal(msg: []const u8) noreturn { + std.debug.print("Error: {s}\n", .{msg}); + std.debug.print("Usage: {s}", .{usage}); + std.process.exit(1); } test { From 6579def2e32afc491a386381a4729af91c5045dd Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki <der.teufel.mail@gmail.com> Date: Wed, 5 Mar 2025 13:13:06 +0100 Subject: [PATCH 05/26] Update CI to use 0.14.0 release --- .github/workflows/validate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index ccc1d1d..89194e8 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -16,7 +16,7 @@ jobs: - name: Setup Zig uses: mlugg/setup-zig@v1 with: - version: 0.14.0-dev.3020+c104e8644 + version: 0.14.0 - name: Basic Build run: | From fd05fff7020e0882d0bb68436948622a650bf669 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Quei=C3=9Fner?= <git@random-projects.net> Date: Wed, 5 Mar 2025 16:26:17 +0100 Subject: [PATCH 06/26] Adds way more implementation --- .gitignore | 1 + README.md | 68 ++++- flake.lock | 147 ----------- flake.nix | 49 ---- justfile | 30 +++ src/Parser.zig | 71 +++--- src/components/EmptyData.zig | 11 + src/components/FillData.zig | 14 ++ src/components/RawData.zig | 20 ++ src/components/fs/FatFileSystem.zig | 7 + src/components/part/GptPartitionTable.zig | 7 + src/components/part/MbrPartitionTable.zig | 7 + src/dim.zig | 290 +++++++++++++++++++++- tests/basic/empty.dis | 1 + tests/basic/fill-0x00.dis | 1 + tests/basic/fill-0xAA.dis | 1 + tests/basic/fill-0xFF.dis | 1 + tests/basic/raw.dis | 1 + tests/part/mbr/minimal.dis | 5 + 19 files changed, 493 insertions(+), 239 deletions(-) delete mode 100644 flake.lock delete mode 100644 flake.nix create mode 100644 justfile create mode 100644 src/components/EmptyData.zig create mode 100644 src/components/FillData.zig create mode 100644 src/components/RawData.zig create mode 100644 src/components/fs/FatFileSystem.zig create mode 100644 src/components/part/GptPartitionTable.zig create mode 100644 src/components/part/MbrPartitionTable.zig create mode 100644 tests/basic/empty.dis create mode 100644 tests/basic/fill-0x00.dis create mode 100644 tests/basic/fill-0xAA.dis create mode 100644 tests/basic/fill-0xFF.dis create mode 100644 tests/basic/raw.dis create mode 100644 tests/part/mbr/minimal.dis diff --git a/.gitignore b/.gitignore index 3389c86..d909469 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .zig-cache/ zig-out/ +.vscode/ \ No newline at end of file diff --git a/README.md b/README.md index 55314a9..40d3c37 100644 --- a/README.md +++ b/README.md @@ -1 +1,67 @@ -# disk-image-step +# Disk Image Creator + +The Disk Image Creator is a tool that uses a simple textual description of a disk image to create actual images. + +This tool is incredibly valuable when implementing your own operating system or deployments. + +## Example + +```plain + +``` + +## Available Content Types + +```plain + +``` + +### Empty Content (`empty`) + +This type of content does not change its range at all and keeps it empty. No bytes will be emitted. + +```plain +empty +``` + +### Fill (`fill`) + +The *Fill* type will fill the remaining size in its space with the given `<byte>` value. + +```plain +fill <byte> +``` + +### Raw Binary Content (`raw`) + +The *Raw* type will include the file at `<path>` verbatim and will error, if not enough space is available. + +`<path>` is relative to the current file. + +```plain +raw <path> +``` + +### MBR Partition Table (`mbr-part`) + +```plain + +``` + +### GPT Partition Table (`gpt-part`) + +```plain + +``` + +### FAT File System (`fat`) + +```plain + +``` + +## Compiling + +- Install [Zig 0.14.0](https://ziglang.org/download/). +- Invoke `zig build -Drelease` in the repository root. +- Execute `./zig-out/bin/dim --help` to verify your compilation worked. diff --git a/flake.lock b/flake.lock deleted file mode 100644 index 1ba4434..0000000 --- a/flake.lock +++ /dev/null @@ -1,147 +0,0 @@ -{ - "nodes": { - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1718229064, - "narHash": "sha256-ZFav8A9zPNfjZg/wrxh1uZeMJHELRfRgFP+meq01XYk=", - "owner": "nixos", - "repo": "nixpkgs", - "rev": "5c2ec3a5c2ee9909904f860dadc19bc12cd9cc44", - "type": "github" - }, - "original": { - "owner": "nixos", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1708161998, - "narHash": "sha256-6KnemmUorCvlcAvGziFosAVkrlWZGIc6UNT9GUYr0jQ=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "84d981bae8b5e783b3b548de505b22880559515f", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "root": { - "inputs": { - "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs", - "zig": "zig" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "zig": { - "inputs": { - "flake-compat": "flake-compat", - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs_2" - }, - "locked": { - "lastModified": 1718324667, - "narHash": "sha256-AZGskEGjvUmeb+fgBv4lxtCUtXmYBI+ABOlV+og9X14=", - "owner": "mitchellh", - "repo": "zig-overlay", - "rev": "b2c14e5f842af6b2bf03e634f73fd84f6956d4ba", - "type": "github" - }, - "original": { - "owner": "mitchellh", - "repo": "zig-overlay", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/flake.nix b/flake.nix deleted file mode 100644 index 9f19d8d..0000000 --- a/flake.nix +++ /dev/null @@ -1,49 +0,0 @@ -{ - description = "A build step for Zig that construct arbitrary disk images"; - - inputs = { - nixpkgs.url = "github:nixos/nixpkgs/nixos-23.11"; - flake-utils.url = "github:numtide/flake-utils"; - zig.url = "github:mitchellh/zig-overlay"; - }; - - outputs = { - self, - nixpkgs, - flake-utils, - ... - } @ inputs: let - overlays = [ - # Other overlays - (final: prev: { - zigpkgs = inputs.zig.packages.${prev.system}; - }) - ]; - - # Our supported systems are the same supported systems as the Zig binaries - systems = builtins.attrNames inputs.zig.packages; - in - flake-utils.lib.eachSystem systems ( - system: let - pkgs = import nixpkgs {inherit overlays system;}; - in let - zig = pkgs.zigpkgs."0.13.0"; - in rec { - packages.default = pkgs.stdenv.mkDerivation { - name = "zig-disk-image-step"; - src = ./.; - nativeBuildInputs = [zig]; - - configurePhase = ""; - - buildPhase = '' - zig build - ''; - - installPhase = '' - mv zig-out $out - ''; - }; - } - ); -} diff --git a/justfile b/justfile new file mode 100644 index 0000000..e7dc9e9 --- /dev/null +++ b/justfile @@ -0,0 +1,30 @@ + +zig:="zig-0.14.0" + +default: install test + +install: + {{zig}} build install + +test: unit-test behaviour-tests + +unit-test: + {{zig}} build test + +behaviour-tests: \ + (behaviour-test "tests/basic/empty.dis") \ + (behaviour-test "tests/basic/fill-0x00.dis") \ + (behaviour-test "tests/basic/fill-0xAA.dis") \ + (behaviour-test "tests/basic/fill-0xFF.dis") \ + (behaviour-test "tests/basic/raw.dis") \ + (behaviour-test "tests/part/mbr/minimal.dis") + +behaviour-test script: + {{zig}} build install + + ./zig-out/bin/dim --output .zig-cache/disk.img --script "{{script}}" + ./zig-out/bin/dim --output .zig-cache/disk.img --size 30M --script "{{script}}" + + +fuzz: + {{zig}} build install test --fuzz --port 35991 diff --git a/src/Parser.zig b/src/Parser.zig index 21babe6..75c841d 100644 --- a/src/Parser.zig +++ b/src/Parser.zig @@ -38,7 +38,7 @@ const File = struct { }; allocator: std.mem.Allocator, -io: IO, +io: *const IO, file_stack: []File, max_include_depth: usize, @@ -46,7 +46,7 @@ max_include_depth: usize, pub const InitOptions = struct { max_include_depth: usize, }; -pub fn init(allocator: std.mem.Allocator, io: IO, options: InitOptions) error{OutOfMemory}!Parser { +pub fn init(allocator: std.mem.Allocator, io: *const IO, options: InitOptions) error{OutOfMemory}!Parser { var slice = try allocator.alloc(File, options.max_include_depth); slice.len = 0; return .{ @@ -88,6 +88,22 @@ pub fn push_source(parser: *Parser, options: struct { } pub fn push_file(parser: *Parser, include_path: []const u8) !void { + const abs_include_path = try parser.get_include_path(parser.allocator, include_path); + + const file_contents = try parser.io.fetch_file(parser.allocator, abs_include_path); + errdefer parser.allocator.free(file_contents); + + const index = parser.file_stack.len; + parser.file_stack.len += 1; + + parser.file_stack[index] = .{ + .path = abs_include_path, + .tokenizer = .init(file_contents), + .free = true, + }; +} + +pub fn get_include_path(parser: Parser, allocator: std.mem.Allocator, rel_include_path: []const u8) ![]const u8 { std.debug.assert(parser.file_stack.len <= parser.max_include_depth); if (parser.file_stack.len == parser.max_include_depth) return error.MaxIncludeDepthReached; @@ -98,28 +114,25 @@ pub fn push_file(parser: *Parser, include_path: []const u8) !void { ""; const abs_include_path = try std.fs.path.resolvePosix( - parser.allocator, + allocator, &.{ std.fs.path.dirnamePosix(top_path) orelse ".", - include_path, + rel_include_path, }, ); - errdefer parser.allocator.free(abs_include_path); - - const file_contents = try parser.io.fetch_file(parser.allocator, abs_include_path); - errdefer parser.allocator.free(file_contents); + errdefer allocator.free(abs_include_path); - const index = parser.file_stack.len; - parser.file_stack.len += 1; + return abs_include_path; +} - parser.file_stack[index] = .{ - .path = abs_include_path, - .tokenizer = .init(file_contents), - .free = true, - }; +pub fn next(parser: *Parser) (Error || error{UnexpectedEndOfFile})![]const u8 { + return if (try parser.next_or_eof()) |word| + word + else + error.UnexpectedEndOfFile; } -pub fn next(parser: *Parser) Error!?[]const u8 { +pub fn next_or_eof(parser: *Parser) Error!?[]const u8 { if (parser.file_stack.len == 0) return null; @@ -206,7 +219,7 @@ test Parser { .resolve_variable_fn = undefined, }; - var parser: Parser = try .init(std.testing.allocator, io, .{ + var parser: Parser = try .init(std.testing.allocator, &io, .{ .max_include_depth = 8, }); defer parser.deinit(); @@ -241,10 +254,10 @@ test Parser { }; for (sequence) |item| { - try std.testing.expectEqualStrings(item, (try parser.next()).?); + try std.testing.expectEqualStrings(item, (try parser.next_or_eof()).?); } - try std.testing.expectEqual(null, parser.next()); + try std.testing.expectEqual(null, parser.next_or_eof()); } test "parser with variables" { @@ -263,7 +276,7 @@ test "parser with variables" { .resolve_variable_fn = MyIO.resolve_variable, }; - var parser: Parser = try .init(std.testing.allocator, io, .{ + var parser: Parser = try .init(std.testing.allocator, &io, .{ .max_include_depth = 8, }); defer parser.deinit(); @@ -286,10 +299,10 @@ test "parser with variables" { }; for (sequence) |item| { - try std.testing.expectEqualStrings(item, (try parser.next()).?); + try std.testing.expectEqualStrings(item, (try parser.next_or_eof()).?); } - try std.testing.expectEqual(null, parser.next()); + try std.testing.expectEqual(null, parser.next_or_eof()); } test "parser with variables and include files" { @@ -314,7 +327,7 @@ test "parser with variables and include files" { .resolve_variable_fn = MyIO.resolve_variable, }; - var parser: Parser = try .init(std.testing.allocator, io, .{ + var parser: Parser = try .init(std.testing.allocator, &io, .{ .max_include_depth = 8, }); defer parser.deinit(); @@ -337,10 +350,10 @@ test "parser with variables and include files" { }; for (sequence) |item| { - try std.testing.expectEqualStrings(item, (try parser.next()).?); + try std.testing.expectEqualStrings(item, (try parser.next_or_eof()).?); } - try std.testing.expectEqual(null, parser.next()); + try std.testing.expectEqual(null, parser.next_or_eof()); } test "parse nothing" { @@ -349,12 +362,12 @@ test "parse nothing" { .resolve_variable_fn = undefined, }; - var parser: Parser = try .init(std.testing.allocator, io, .{ + var parser: Parser = try .init(std.testing.allocator, &io, .{ .max_include_depth = 8, }); defer parser.deinit(); - try std.testing.expectEqual(null, parser.next()); + try std.testing.expectEqual(null, parser.next_or_eof()); } fn fuzz_parser(_: void, input: []const u8) !void { @@ -376,7 +389,7 @@ fn fuzz_parser(_: void, input: []const u8) !void { .resolve_variable_fn = FuzzIO.resolve_variable, }; - var parser: Parser = try .init(std.testing.allocator, io, .{ + var parser: Parser = try .init(std.testing.allocator, &io, .{ .max_include_depth = 8, }); defer parser.deinit(); @@ -387,7 +400,7 @@ fn fuzz_parser(_: void, input: []const u8) !void { }); while (true) { - const res = parser.next() catch |err| switch (err) { + const res = parser.next_or_eof() catch |err| switch (err) { error.UnknownDirective, error.UnknownVariable, error.BadDirective, diff --git a/src/components/EmptyData.zig b/src/components/EmptyData.zig new file mode 100644 index 0000000..ccc7c06 --- /dev/null +++ b/src/components/EmptyData.zig @@ -0,0 +1,11 @@ +//! +//! The `empty` content will just not touch anything in the output +//! and serves as a placeholder. +//! + +const std = @import("std"); +const dim = @import("../dim.zig"); + +pub fn execute(ctx: dim.Context) !void { + _ = ctx; +} diff --git a/src/components/FillData.zig b/src/components/FillData.zig new file mode 100644 index 0000000..69811e8 --- /dev/null +++ b/src/components/FillData.zig @@ -0,0 +1,14 @@ +//! +//! The `fill <byte>` content will fill the remaining space with the given `<byte>` value. +//! + +const std = @import("std"); +const dim = @import("../dim.zig"); + +pub fn execute(ctx: dim.Context) !void { + const fill_value: u8 = try ctx.get_integer(u8, 0); + + if (ctx.get_remaining_size()) |size| { + try ctx.writer().writeByteNTimes(fill_value, size); + } +} diff --git a/src/components/RawData.zig b/src/components/RawData.zig new file mode 100644 index 0000000..88ad715 --- /dev/null +++ b/src/components/RawData.zig @@ -0,0 +1,20 @@ +const std = @import("std"); +const dim = @import("../dim.zig"); + +pub fn execute(ctx: dim.Context) !void { + const path = try ctx.get_string(); + + var file = try ctx.open_file(path); + defer file.close(); + + if (ctx.get_remaining_size()) |available_size| { + const stat = try file.stat(); + + if (available_size < stat.size) + return error.InsufficientSize; // TODO: Error reporting + } + + var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); + + try fifo.pump(file.reader(), ctx.writer()); +} diff --git a/src/components/fs/FatFileSystem.zig b/src/components/fs/FatFileSystem.zig new file mode 100644 index 0000000..24350e2 --- /dev/null +++ b/src/components/fs/FatFileSystem.zig @@ -0,0 +1,7 @@ +const std = @import("std"); +const dim = @import("../../dim.zig"); + +pub fn execute(ctx: dim.Context) !void { + _ = ctx; + @panic("fat not implemented yet!"); +} diff --git a/src/components/part/GptPartitionTable.zig b/src/components/part/GptPartitionTable.zig new file mode 100644 index 0000000..85f1cc8 --- /dev/null +++ b/src/components/part/GptPartitionTable.zig @@ -0,0 +1,7 @@ +const std = @import("std"); +const dim = @import("../../dim.zig"); + +pub fn execute(ctx: dim.Context) !void { + _ = ctx; + @panic("gpt-part not implemented yet!"); +} diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig new file mode 100644 index 0000000..0b1c691 --- /dev/null +++ b/src/components/part/MbrPartitionTable.zig @@ -0,0 +1,7 @@ +const std = @import("std"); +const dim = @import("../../dim.zig"); + +pub fn execute(ctx: dim.Context) !void { + _ = ctx; + @panic("mbr-part not implemented yet!"); +} diff --git a/src/dim.zig b/src/dim.zig index a646418..0a5fb75 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -7,9 +7,11 @@ const Tokenizer = @import("Tokenizer.zig"); const Parser = @import("Parser.zig"); const args = @import("args"); +const max_script_size = 10 * DiskSize.MiB; + const Options = struct { output: ?[]const u8 = null, - size: ?u32 = null, + size: ?DiskSize = null, script: ?[]const u8 = null, @"import-env": bool = false, }; @@ -32,7 +34,9 @@ const usage = \\ ; -pub fn main() !void { +const VariableMap = std.StringArrayHashMapUnmanaged([]const u8); + +pub fn main() !u8 { var gpa_impl: std.heap.DebugAllocator(.{}) = .init; defer _ = gpa_impl.deinit(); @@ -41,31 +45,77 @@ pub fn main() !void { const opts = try args.parseForCurrentProcess(Options, gpa, .print); defer opts.deinit(); - var var_map: std.StringArrayHashMapUnmanaged([]const u8) = .empty; + const options = opts.options; + + const output_path = options.output orelse fatal("No output path specified"); + const script_path = options.script orelse fatal("No script specified"); + + var var_map: VariableMap = .empty; defer var_map.deinit(gpa); + var env_map = try std.process.getEnvMap(gpa); + defer env_map.deinit(); + + if (options.@"import-env") { + var iter = env_map.iterator(); + while (iter.next()) |entry| { + try var_map.putNoClobber(gpa, entry.key_ptr.*, entry.value_ptr.*); + } + } + + var bad_args = false; for (opts.positionals) |pos| { if (std.mem.indexOfScalar(u8, pos, '=')) |idx| { const key = pos[0..idx]; const val = pos[idx + 1 ..]; try var_map.put(gpa, key, val); + } else { + std.debug.print("unexpected argument positional '{}'\n", .{ + std.zig.fmtEscapes(pos), + }); + bad_args = true; } } + if (bad_args) + return 1; - const options = opts.options; + var current_dir = try std.fs.cwd().openDir(".", .{}); + defer current_dir.close(); - if (options.output == null) { - fatal("No output path specified"); - } + const script_source = try current_dir.readFileAlloc(gpa, script_path, max_script_size); + defer gpa.free(script_source); - if (options.script == null) { - fatal("No script specified"); - } + var output_file = try current_dir.atomicFile(output_path, .{}); + defer output_file.deinit(); + + var env = Environment{ + .allocator = gpa, + .vars = &var_map, + .include_base = current_dir, + .parser = undefined, + }; - std.debug.print( - "Output={?s} Script={?s} Size={?} import-env={}\n", - .{ options.output, options.script, options.size, options.@"import-env" }, + var parser = try Parser.init( + gpa, + &env.io, + .{ + .max_include_depth = 8, + }, ); + defer parser.deinit(); + + env.parser = &parser; + + try parser.push_source(.{ + .path = script_path, + .contents = script_source, + }); + + try env.execute_content(&parser); + + try output_file.finish(); + + return 0; } fn fatal(msg: []const u8) noreturn { @@ -74,7 +124,221 @@ fn fatal(msg: []const u8) noreturn { std.process.exit(1); } +const content_types: []const struct { []const u8, type } = &.{ + .{ "mbr-part", @import("components/part/MbrPartitionTable.zig") }, + .{ "gpt-part", @import("components/part/GptPartitionTable.zig") }, + .{ "fat", @import("components/fs/FatFileSystem.zig") }, + .{ "raw", @import("components/RawData.zig") }, + .{ "empty", @import("components/EmptyData.zig") }, + .{ "fill", @import("components/FillData.zig") }, +}; + +pub const Context = struct { + env: *Environment, + + pub const WriteError = error{}; + pub const Writer = std.io.Writer(*const Context, WriteError, write_some_data); + + pub fn get_remaining_size(ctx: Context) ?u64 { + _ = ctx; + + // TODO: This + return null; + } + + pub fn open_file(ctx: Context, path: []const u8) !std.fs.File { + const abs_path = try ctx.env.parser.get_include_path(ctx.env.allocator, path); + defer ctx.env.allocator.free(abs_path); + + return ctx.env.include_base.openFile(abs_path, .{}); + } + + pub fn writer(ctx: *const Context) Writer { + return .{ .context = ctx }; + } + + pub fn get_string(ctx: Context) ![]const u8 { + return ctx.env.parser.next(); + } + + pub fn get_enum(ctx: Context, comptime E: type) !E { + if (@typeInfo(E) != .@"enum") + @compileError("get_enum requires an enum type!"); + return std.meta.stringToEnum( + E, + ctx.get_string(), + ) orelse return error.InvalidEnumTag; + } + + pub fn get_integer(ctx: Context, comptime I: type, base: u8) !I { + if (@typeInfo(I) != .int) + @compileError("get_integer requires an integer type!"); + return try std.fmt.parseInt( + I, + try ctx.get_string(), + base, + ); + } + + fn write_some_data(ctx: *const Context, buffer: []const u8) WriteError!usize { + _ = ctx; + // TODO: Implement this! + return buffer.len; + } +}; + +const Environment = struct { + allocator: std.mem.Allocator, + parser: *Parser, + include_base: std.fs.Dir, + vars: *const VariableMap, + + io: Parser.IO = .{ + .fetch_file_fn = fetch_file, + .resolve_variable_fn = resolve_var, + }, + + fn execute_content(env: *Environment, parser: *Parser) !void { + const content_type_str = try parser.next(); + + inline for (content_types) |tn| { + const name, const impl = tn; + + if (std.mem.eql(u8, name, content_type_str)) { + return impl.execute(Context{ .env = env }); + } + } + return error.UnknownContentType; + } + + fn fetch_file(io: *const Parser.IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { + const env: *const Environment = @fieldParentPtr("io", io); + return env.include_base.readFileAlloc(allocator, path, max_script_size) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.FileNotFound => return error.FileNotFound, + else => return error.IoError, + }; + } + + fn resolve_var(io: *const Parser.IO, name: []const u8) error{UnknownVariable}![]const u8 { + const env: *const Environment = @fieldParentPtr("io", io); + return env.vars.get(name) orelse return error.UnknownVariable; + } +}; + test { _ = Tokenizer; _ = Parser; } + +const DiskSize = enum(u64) { + const KiB = 1024; + const MiB = 1024 * 1024; + const GiB = 1024 * 1024 * 1024; + + _, + + pub fn parse(str: []const u8) error{ InvalidSize, Overflow }!DiskSize { + const suffix_scaling: ?u64 = if (std.mem.endsWith(u8, str, "K")) + KiB + else if (std.mem.endsWith(u8, str, "M")) + MiB + else if (std.mem.endsWith(u8, str, "G")) + GiB + else + null; + + const cutoff: usize = if (suffix_scaling != null) 1 else 0; + + const numeric_text = std.mem.trim(u8, str[0 .. str.len - cutoff], " \t\r\n"); + + const raw_number = std.fmt.parseInt(u64, numeric_text, 0) catch |err| switch (err) { + error.Overflow => return error.Overflow, + error.InvalidCharacter => return error.InvalidSize, + }; + + const byte_size = if (suffix_scaling) |scale| + try std.math.mul(u64, raw_number, scale) + else + raw_number; + + return @enumFromInt(byte_size); + } + + pub fn size_in_bytes(ds: DiskSize) u64 { + return @intFromEnum(ds); + } + + pub fn format(ds: DiskSize, fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { + _ = fmt; + _ = opt; + + const size = ds.size_in_bytes(); + + const div: u64, const unit: []const u8 = if (size > GiB) + .{ GiB, " GiBi" } + else if (size > MiB) + .{ MiB, " MeBi" } + else if (size > KiB) + .{ KiB, " KiBi" } + else + .{ 1, " B" }; + + if (size == 0) { + try writer.writeAll("0 B"); + return; + } + + const scaled_value = (1000 * size) / div; + + var buf: [std.math.log2_int_ceil(u64, std.math.maxInt(u64))]u8 = undefined; + const divided = try std.fmt.bufPrint(&buf, "{d}", .{scaled_value}); + + std.debug.assert(divided.len >= 3); + + const prefix, const suffix = .{ + divided[0 .. divided.len - 3], + std.mem.trimRight(u8, divided[divided.len - 3 ..], "0"), + }; + + if (suffix.len > 0) { + try writer.print("{s}.{s}{s}", .{ prefix, suffix, unit }); + } else { + try writer.print("{s}{s}", .{ prefix, unit }); + } + } +}; + +test DiskSize { + const KiB = 1024; + const MiB = 1024 * 1024; + const GiB = 1024 * 1024 * 1024; + + const patterns: []const struct { u64, []const u8 } = &.{ + .{ 0, "0" }, + .{ 1000, "1000" }, + .{ 4096, "0x1000" }, + .{ 4096 * MiB, "0x1000 M" }, + .{ 1 * KiB, "1K" }, + .{ 1 * KiB, "1K" }, + .{ 1 * KiB, "1 K" }, + .{ 150 * KiB, "150K" }, + + .{ 1 * MiB, "1M" }, + .{ 1 * MiB, "1M" }, + .{ 1 * MiB, "1 M" }, + .{ 150 * MiB, "150M" }, + + .{ 1 * GiB, "1G" }, + .{ 1 * GiB, "1G" }, + .{ 1 * GiB, "1 G" }, + .{ 150 * GiB, "150G" }, + }; + + for (patterns) |pat| { + const size_in_bytes, const stringified = pat; + const actual_size = try DiskSize.parse(stringified); + + try std.testing.expectEqual(size_in_bytes, actual_size.size_in_bytes()); + } +} diff --git a/tests/basic/empty.dis b/tests/basic/empty.dis new file mode 100644 index 0000000..c6cac69 --- /dev/null +++ b/tests/basic/empty.dis @@ -0,0 +1 @@ +empty diff --git a/tests/basic/fill-0x00.dis b/tests/basic/fill-0x00.dis new file mode 100644 index 0000000..e2f84a5 --- /dev/null +++ b/tests/basic/fill-0x00.dis @@ -0,0 +1 @@ +fill 0x00 diff --git a/tests/basic/fill-0xAA.dis b/tests/basic/fill-0xAA.dis new file mode 100644 index 0000000..d07e9d1 --- /dev/null +++ b/tests/basic/fill-0xAA.dis @@ -0,0 +1 @@ +fill 0xAA diff --git a/tests/basic/fill-0xFF.dis b/tests/basic/fill-0xFF.dis new file mode 100644 index 0000000..00586bd --- /dev/null +++ b/tests/basic/fill-0xFF.dis @@ -0,0 +1 @@ +fill 0xFF diff --git a/tests/basic/raw.dis b/tests/basic/raw.dis new file mode 100644 index 0000000..094ac08 --- /dev/null +++ b/tests/basic/raw.dis @@ -0,0 +1 @@ +raw ./raw.dis diff --git a/tests/part/mbr/minimal.dis b/tests/part/mbr/minimal.dis new file mode 100644 index 0000000..273948a --- /dev/null +++ b/tests/part/mbr/minimal.dis @@ -0,0 +1,5 @@ +mbr-part + ignore # partition 1 + ignore # partition 2 + ignore # partition 3 + ignore # partition 4 From e52c6edd51b96c57ae51c27b3dd2017442d4ab74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Sun, 9 Mar 2025 14:01:55 +0100 Subject: [PATCH 07/26] starts to refactor into tree of dynamic content --- README.md | 4 +- build.zig.zon | 2 +- concept/script.dis | 2 +- justfile | 6 +- src/build.old.zig | 204 ----------- src/components/EmptyData.zig | 20 +- src/components/FillData.zig | 28 +- src/components/PasteFile.zig | 27 ++ src/components/RawData.zig | 20 -- src/components/part/GptPartitionTable.zig | 61 ++++ src/components/part/MbrPartitionTable.zig | 347 ++++++++++++++++++- src/dim.zig | 403 +++++++++++++++++++--- tests/basic/raw.dis | 2 +- 13 files changed, 841 insertions(+), 285 deletions(-) create mode 100644 src/components/PasteFile.zig delete mode 100644 src/components/RawData.zig diff --git a/README.md b/README.md index 40d3c37..8fbfcdc 100644 --- a/README.md +++ b/README.md @@ -32,14 +32,14 @@ The *Fill* type will fill the remaining size in its space with the given `<byte> fill <byte> ``` -### Raw Binary Content (`raw`) +### Paste File Contents (`paste-file`) The *Raw* type will include the file at `<path>` verbatim and will error, if not enough space is available. `<path>` is relative to the current file. ```plain -raw <path> +paste-file <path> ``` ### MBR Partition Table (`mbr-part`) diff --git a/build.zig.zon b/build.zig.zon index ad2b257..bb78edd 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -5,7 +5,7 @@ .dependencies = .{ .zfat = .{ .url = "https://github.com/ZigEmbeddedGroup/zfat/archive/3ce06d43a4e04d387034dcae2f486b050701f321.tar.gz", - .hash = "12205d874e8c9fd08d93c09ccbfddb045809afcc28e232b36b5abe3d288278ce458f", + .hash = "zfat-0.0.0-AAAAAMYlcABdh06Mn9CNk8Ccy_3bBFgJr8wo4jKza1q-", }, .args = .{ .url = "git+https://github.com/ikskuh/zig-args.git#9425b94c103a031777fdd272c555ce93a7dea581", diff --git a/concept/script.dis b/concept/script.dis index a847fca..b7ac790 100644 --- a/concept/script.dis +++ b/concept/script.dis @@ -1,5 +1,5 @@ mbr-part - bootloader $PATH1 + bootloader paste-file $PATH1 part # partition 1 type fat32-lba size 500M diff --git a/justfile b/justfile index e7dc9e9..55d2044 100644 --- a/justfile +++ b/justfile @@ -19,11 +19,9 @@ behaviour-tests: \ (behaviour-test "tests/basic/raw.dis") \ (behaviour-test "tests/part/mbr/minimal.dis") -behaviour-test script: - {{zig}} build install - +behaviour-test script: install ./zig-out/bin/dim --output .zig-cache/disk.img --script "{{script}}" - ./zig-out/bin/dim --output .zig-cache/disk.img --size 30M --script "{{script}}" + ./zig-out/bin/dim --output .zig-cache/disk.img --script "{{script}}" --size 30M fuzz: diff --git a/src/build.old.zig b/src/build.old.zig index bbc14d4..48d653f 100644 --- a/src/build.old.zig +++ b/src/build.old.zig @@ -712,210 +712,6 @@ pub const Content = union(enum) { } }; -pub const mbr = struct { - pub const Table = struct { - bootloader: [440]u8 = .{0} ** 440, - disk_id: ?u32 = null, - partitions: [4]?*const Partition, - }; - - pub const Partition = struct { - offset: ?u64 = null, - size: u64, - - bootable: bool, - type: PartitionType, - - data: Content, - }; - - /// https://en.wikipedia.org/wiki/Partition_type - pub const PartitionType = enum(u8) { - empty = 0x00, - - fat12 = 0x01, - ntfs = 0x07, - - fat32_chs = 0x0B, - fat32_lba = 0x0C, - - fat16_lba = 0x0E, - - linux_swap = 0x82, - linux_fs = 0x83, - linux_lvm = 0x8E, - - // Output from fdisk (util-linux 2.38.1) - // 00 Leer 27 Verst. NTFS Win 82 Linux Swap / So c1 DRDOS/sec (FAT- - // 01 FAT12 39 Plan 9 83 Linux c4 DRDOS/sec (FAT- - // 02 XENIX root 3c PartitionMagic 84 versteckte OS/2 c6 DRDOS/sec (FAT- - // 03 XENIX usr 40 Venix 80286 85 Linux erweitert c7 Syrinx - // 04 FAT16 <32M 41 PPC PReP Boot 86 NTFS Datenträge da Keine Dateisyst - // 05 Erweiterte 42 SFS 87 NTFS Datenträge db CP/M / CTOS / . - // 06 FAT16 4d QNX4.x 88 Linux Klartext de Dell Dienstprog - // 07 HPFS/NTFS/exFAT 4e QNX4.x 2. Teil 8e Linux LVM df BootIt - // 08 AIX 4f QNX4.x 3. Teil 93 Amoeba e1 DOS-Zugriff - // 09 AIX bootfähig 50 OnTrack DM 94 Amoeba BBT e3 DOS R/O - // 0a OS/2-Bootmanage 51 OnTrack DM6 Aux 9f BSD/OS e4 SpeedStor - // 0b W95 FAT32 52 CP/M a0 IBM Thinkpad Ru ea Linux erweitert - // 0c W95 FAT32 (LBA) 53 OnTrack DM6 Aux a5 FreeBSD eb BeOS Dateisyste - // 0e W95 FAT16 (LBA) 54 OnTrackDM6 a6 OpenBSD ee GPT - // 0f W95 Erw. (LBA) 55 EZ-Drive a7 NeXTSTEP ef EFI (FAT-12/16/ - // 10 OPUS 56 Golden Bow a8 Darwin UFS f0 Linux/PA-RISC B - // 11 Verst. FAT12 5c Priam Edisk a9 NetBSD f1 SpeedStor - // 12 Compaq Diagnost 61 SpeedStor ab Darwin Boot f4 SpeedStor - // 14 Verst. FAT16 <3 63 GNU HURD oder S af HFS / HFS+ f2 DOS sekundär - // 16 Verst. FAT16 64 Novell Netware b7 BSDi Dateisyste f8 EBBR geschützt - // 17 Verst. HPFS/NTF 65 Novell Netware b8 BSDI Swap fb VMware VMFS - // 18 AST SmartSleep 70 DiskSecure Mult bb Boot-Assistent fc VMware VMKCORE - // 1b Verst. W95 FAT3 75 PC/IX bc Acronis FAT32 L fd Linux RAID-Auto - // 1c Verst. W95 FAT3 80 Altes Minix be Solaris Boot fe LANstep - // 1e Verst. W95 FAT1 81 Minix / altes L bf Solaris ff BBT - // 24 NEC DOS - - _, - }; - - pub fn encodeMbrChsEntry(lba: u32) [3]u8 { - var chs = lbaToChs(lba); - - if (chs.cylinder >= 1024) { - chs = .{ - .cylinder = 1023, - .head = 255, - .sector = 63, - }; - } - - const cyl: u10 = @intCast(chs.cylinder); - const head: u8 = @intCast(chs.head); - const sect: u6 = @intCast(chs.sector); - - const sect_cyl: u8 = @as(u8, 0xC0) & @as(u8, @truncate(cyl >> 2)) + sect; - const sect_8: u8 = @truncate(cyl); - - return .{ head, sect_cyl, sect_8 }; - } - - const CHS = struct { - cylinder: u32, - head: u8, // limit: 256 - sector: u6, // limit: 64 - - pub fn init(c: u32, h: u8, s: u6) CHS { - return .{ .cylinder = c, .head = h, .sector = s }; - } - }; - - pub fn lbaToChs(lba: u32) CHS { - const hpc = 255; - const spt = 63; - - // C, H and S are the cylinder number, the head number, and the sector number - // LBA is the logical block address - // HPC is the maximum number of heads per cylinder (reported by disk drive, typically 16 for 28-bit LBA) - // SPT is the maximum number of sectors per track (reported by disk drive, typically 63 for 28-bit LBA) - // LBA = (C * HPC + H) * SPT + (S - 1) - - const sector = (lba % spt); - const cyl_head = (lba / spt); - - const head = (cyl_head % hpc); - const cyl = (cyl_head / hpc); - - return CHS{ - .sector = @intCast(sector + 1), - .head = @intCast(head), - .cylinder = cyl, - }; - } -}; - -// test "lba to chs" { -// // table from https://en.wikipedia.org/wiki/Logical_block_addressing#CHS_conversion -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 1), mbr.lbaToChs(0)); -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 2), mbr.lbaToChs(1)); -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 3), mbr.lbaToChs(2)); -// try std.testing.expectEqual(mbr.CHS.init(0, 0, 63), mbr.lbaToChs(62)); -// try std.testing.expectEqual(mbr.CHS.init(0, 1, 1), mbr.lbaToChs(63)); -// try std.testing.expectEqual(mbr.CHS.init(0, 15, 1), mbr.lbaToChs(945)); -// try std.testing.expectEqual(mbr.CHS.init(0, 15, 63), mbr.lbaToChs(1007)); -// try std.testing.expectEqual(mbr.CHS.init(1, 0, 1), mbr.lbaToChs(1008)); -// try std.testing.expectEqual(mbr.CHS.init(1, 0, 63), mbr.lbaToChs(1070)); -// try std.testing.expectEqual(mbr.CHS.init(1, 1, 1), mbr.lbaToChs(1071)); -// try std.testing.expectEqual(mbr.CHS.init(1, 1, 63), mbr.lbaToChs(1133)); -// try std.testing.expectEqual(mbr.CHS.init(1, 2, 1), mbr.lbaToChs(1134)); -// try std.testing.expectEqual(mbr.CHS.init(1, 15, 63), mbr.lbaToChs(2015)); -// try std.testing.expectEqual(mbr.CHS.init(2, 0, 1), mbr.lbaToChs(2016)); -// try std.testing.expectEqual(mbr.CHS.init(15, 15, 63), mbr.lbaToChs(16127)); -// try std.testing.expectEqual(mbr.CHS.init(16, 0, 1), mbr.lbaToChs(16128)); -// try std.testing.expectEqual(mbr.CHS.init(31, 15, 63), mbr.lbaToChs(32255)); -// try std.testing.expectEqual(mbr.CHS.init(32, 0, 1), mbr.lbaToChs(32256)); -// try std.testing.expectEqual(mbr.CHS.init(16319, 15, 63), mbr.lbaToChs(16450559)); -// try std.testing.expectEqual(mbr.CHS.init(16382, 15, 63), mbr.lbaToChs(16514063)); -// } - -pub const gpt = struct { - pub const Guid = [16]u8; - - pub const Table = struct { - disk_id: Guid, - - partitions: []const Partition, - }; - - pub const Partition = struct { - type: Guid, - part_id: Guid, - - offset: ?u64 = null, - size: u64, - - name: [36]u16, - - attributes: Attributes, - - data: Content, - - pub const Attributes = packed struct(u32) { - system: bool, - efi_hidden: bool, - legacy: bool, - read_only: bool, - hidden: bool, - no_automount: bool, - - padding: u26 = 0, - }; - }; - - /// https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs - pub const PartitionType = struct { - pub const unused: Guid = .{}; - - pub const microsoft_basic_data: Guid = .{}; - pub const microsoft_reserved: Guid = .{}; - - pub const windows_recovery: Guid = .{}; - - pub const plan9: Guid = .{}; - - pub const linux_swap: Guid = .{}; - pub const linux_fs: Guid = .{}; - pub const linux_reserved: Guid = .{}; - pub const linux_lvm: Guid = .{}; - }; - - pub fn nameLiteral(comptime name: []const u8) [36]u16 { - return comptime blk: { - var buf: [36]u16 = undefined; - const len = std.unicode.utf8ToUtf16Le(&buf, name) catch |err| @compileError(@tagName(err)); - @memset(buf[len..], 0); - break :blk &buf; - }; - } -}; - pub const FileSystem = struct { pub const Format = union(enum) { pub const Tag = std.meta.Tag(@This()); diff --git a/src/components/EmptyData.zig b/src/components/EmptyData.zig index ccc7c06..1705f0f 100644 --- a/src/components/EmptyData.zig +++ b/src/components/EmptyData.zig @@ -6,6 +6,22 @@ const std = @import("std"); const dim = @import("../dim.zig"); -pub fn execute(ctx: dim.Context) !void { - _ = ctx; +const EmptyData = @This(); + +pub fn parse(ctx: dim.Context) !dim.Content { + const pf = try ctx.alloc_object(EmptyData); + pf.* = .{}; + return .create_handle(pf, .create(@This(), .{ + .guess_size_fn = guess_size, + .render_fn = render, + })); +} + +fn guess_size(_: *EmptyData) dim.Content.GuessError!dim.SizeGuess { + return .{ .at_least = 0 }; +} + +fn render(self: *EmptyData, stream: *dim.BinaryStream) dim.Content.RenderError!void { + _ = self; + _ = stream; } diff --git a/src/components/FillData.zig b/src/components/FillData.zig index 69811e8..37df9cc 100644 --- a/src/components/FillData.zig +++ b/src/components/FillData.zig @@ -5,10 +5,28 @@ const std = @import("std"); const dim = @import("../dim.zig"); -pub fn execute(ctx: dim.Context) !void { - const fill_value: u8 = try ctx.get_integer(u8, 0); +const FillData = @This(); - if (ctx.get_remaining_size()) |size| { - try ctx.writer().writeByteNTimes(fill_value, size); - } +fill_value: u8, + +pub fn parse(ctx: dim.Context) !dim.Content { + const pf = try ctx.alloc_object(FillData); + pf.* = .{ + .fill_value = try ctx.parse_integer(u8, 0), + }; + return .create_handle(pf, .create(@This(), .{ + .guess_size_fn = guess_size, + .render_fn = render, + })); +} + +fn guess_size(_: *FillData) dim.Content.GuessError!dim.SizeGuess { + return .{ .at_least = 0 }; +} + +fn render(self: *FillData, stream: *dim.BinaryStream) dim.Content.RenderError!void { + try stream.writer().writeByteNTimes( + self.fill_value, + stream.capacity, + ); } diff --git a/src/components/PasteFile.zig b/src/components/PasteFile.zig new file mode 100644 index 0000000..01eccf4 --- /dev/null +++ b/src/components/PasteFile.zig @@ -0,0 +1,27 @@ +const std = @import("std"); +const dim = @import("../dim.zig"); + +const PasteFile = @This(); + +file_handle: dim.FileName, + +pub fn parse(ctx: dim.Context) !dim.Content { + const pf = try ctx.alloc_object(PasteFile); + pf.* = .{ + .file_handle = try ctx.parse_file_name(), + }; + return .create_handle(pf, .create(@This(), .{ + .guess_size_fn = guess_size, + .render_fn = render, + })); +} + +fn guess_size(self: *PasteFile) dim.Content.GuessError!dim.SizeGuess { + const size = try self.file_handle.get_size(); + + return .{ .exact = size }; +} + +fn render(self: *PasteFile, stream: *dim.BinaryStream) dim.Content.RenderError!void { + try self.file_handle.copy_to(stream); +} diff --git a/src/components/RawData.zig b/src/components/RawData.zig deleted file mode 100644 index 88ad715..0000000 --- a/src/components/RawData.zig +++ /dev/null @@ -1,20 +0,0 @@ -const std = @import("std"); -const dim = @import("../dim.zig"); - -pub fn execute(ctx: dim.Context) !void { - const path = try ctx.get_string(); - - var file = try ctx.open_file(path); - defer file.close(); - - if (ctx.get_remaining_size()) |available_size| { - const stat = try file.stat(); - - if (available_size < stat.size) - return error.InsufficientSize; // TODO: Error reporting - } - - var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); - - try fifo.pump(file.reader(), ctx.writer()); -} diff --git a/src/components/part/GptPartitionTable.zig b/src/components/part/GptPartitionTable.zig index 85f1cc8..0252037 100644 --- a/src/components/part/GptPartitionTable.zig +++ b/src/components/part/GptPartitionTable.zig @@ -5,3 +5,64 @@ pub fn execute(ctx: dim.Context) !void { _ = ctx; @panic("gpt-part not implemented yet!"); } + +pub const gpt = struct { + pub const Guid = [16]u8; + + pub const Table = struct { + disk_id: Guid, + + partitions: []const Partition, + }; + + pub const Partition = struct { + type: Guid, + part_id: Guid, + + offset: ?u64 = null, + size: u64, + + name: [36]u16, + + attributes: Attributes, + + // data: Content, + + pub const Attributes = packed struct(u32) { + system: bool, + efi_hidden: bool, + legacy: bool, + read_only: bool, + hidden: bool, + no_automount: bool, + + padding: u26 = 0, + }; + }; + + /// https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs + pub const PartitionType = struct { + pub const unused: Guid = .{}; + + pub const microsoft_basic_data: Guid = .{}; + pub const microsoft_reserved: Guid = .{}; + + pub const windows_recovery: Guid = .{}; + + pub const plan9: Guid = .{}; + + pub const linux_swap: Guid = .{}; + pub const linux_fs: Guid = .{}; + pub const linux_reserved: Guid = .{}; + pub const linux_lvm: Guid = .{}; + }; + + pub fn nameLiteral(comptime name: []const u8) [36]u16 { + return comptime blk: { + var buf: [36]u16 = undefined; + const len = std.unicode.utf8ToUtf16Le(&buf, name) catch |err| @compileError(@tagName(err)); + @memset(buf[len..], 0); + break :blk &buf; + }; + } +}; diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig index 0b1c691..3febf71 100644 --- a/src/components/part/MbrPartitionTable.zig +++ b/src/components/part/MbrPartitionTable.zig @@ -1,7 +1,348 @@ +//! +//! The `mbr-part` content will assembly a managed boot record partition table. +//! +//! const std = @import("std"); const dim = @import("../../dim.zig"); -pub fn execute(ctx: dim.Context) !void { - _ = ctx; - @panic("mbr-part not implemented yet!"); +const PartTable = @This(); + +bootloader: ?dim.Content, +disk_id: ?u32, +partitions: [4]?Partition, + +pub fn parse(ctx: dim.Context) !dim.Content { + const pf = try ctx.alloc_object(PartTable); + pf.* = .{ + .bootloader = null, + .disk_id = null, + .partitions = .{ + null, + null, + null, + null, + }, + }; + + var next_part_id: usize = 0; + while (next_part_id < pf.partitions.len) { + const kw = try ctx.parse_enum(enum { + bootloader, + part, + ignore, + }); + switch (kw) { + .bootloader => { + const bootloader_content = try ctx.parse_content(); + if (pf.bootloader != null) { + try ctx.report_nonfatal_error("mbr-part.bootloader specified twice!", .{}); + } + pf.bootloader = bootloader_content; + }, + .ignore => { + pf.partitions[next_part_id] = .unused; + next_part_id += 1; + }, + .part => { + pf.partitions[next_part_id] = try parse_partition(ctx); + next_part_id += 1; + }, + } + } + + return .create_handle(pf, .create(@This(), .{ + .guess_size_fn = guess_size, + .render_fn = render, + })); +} + +fn parse_partition(ctx: dim.Context) !Partition { + var part: Partition = .{ + .offset = null, + .size = null, + .bootable = false, + .type = .empty, + .data = .empty, + }; + + var updater: dim.FieldUpdater(Partition, &.{ + .offset, + .size, + .bootable, + }) = .init(ctx, &part); + + parse_loop: while (true) { + const kw = try ctx.parse_enum(enum { + type, + size, + bootable, + contents, + endpart, + }); + switch (kw) { + .type => updater.set(.type, try ctx.parse_enum(PartitionType)), + .bootable => updater.set(.bootable, true), + .size => updater.set(.size, try ctx.parse_mem_size()), + .offset => updater.set(.offset, try ctx.parse_mem_size()), + .contents => updater.set(.contents, try ctx.parse_content()), + .endpart => break :parse_loop, + } + } + + try updater.validate(); + + return part; +} + +fn guess_size(self: *PartTable) dim.Content.GuessError!dim.SizeGuess { + _ = self; + @panic("not implemented yet!"); } + +fn render(self: *PartTable, stream: *dim.BinaryStream) dim.Content.RenderError!void { + _ = self; + _ = stream; +} + +// .mbr => |table| { // MbrTable +// { +// var boot_sector: [512]u8 = .{0} ** 512; + +// @memcpy(boot_sector[0..table.bootloader.len], &table.bootloader); + +// std.mem.writeInt(u32, boot_sector[0x1B8..0x1BC], if (table.disk_id) |disk_id| disk_id else 0x0000_0000, .little); +// std.mem.writeInt(u16, boot_sector[0x1BC..0x1BE], 0x0000, .little); + +// var all_auto = true; +// var all_manual = true; +// for (table.partitions) |part_or_null| { +// const part = part_or_null orelse continue; + +// if (part.offset != null) { +// all_auto = false; +// } else { +// all_manual = false; +// } +// } + +// if (!all_auto and !all_manual) { +// std.log.err("{s}: not all partitions have an explicit offset!", .{context.slice()}); +// return error.InvalidSectorBoundary; +// } + +// const part_base = 0x01BE; +// var auto_offset: u64 = 2048; +// for (table.partitions, 0..) |part_or_null, part_id| { +// const reset_len = context.len; +// defer context.len = reset_len; + +// var buffer: [64]u8 = undefined; +// context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); + +// const desc = boot_sector[part_base + 16 * part_id ..][0..16]; + +// if (part_or_null) |part| { +// // https://wiki.osdev.org/MBR#Partition_table_entry_format + +// const part_offset = part.offset orelse auto_offset; + +// if ((part_offset % 512) != 0) { +// std.log.err("{s}: .offset is not divisible by 512!", .{context.slice()}); +// return error.InvalidSectorBoundary; +// } +// if ((part.size % 512) != 0) { +// std.log.err("{s}: .size is not divisible by 512!", .{context.slice()}); +// return error.InvalidSectorBoundary; +// } + +// const lba_u64 = @divExact(part_offset, 512); +// const size_u64 = @divExact(part.size, 512); + +// const lba = std.math.cast(u32, lba_u64) orelse { +// std.log.err("{s}: .offset is out of bounds!", .{context.slice()}); +// return error.InvalidSectorBoundary; +// }; +// const size = std.math.cast(u32, size_u64) orelse { +// std.log.err("{s}: .size is out of bounds!", .{context.slice()}); +// return error.InvalidSectorBoundary; +// }; + +// desc[0] = if (part.bootable) 0x80 else 0x00; + +// desc[1..4].* = mbr.encodeMbrChsEntry(lba); // chs_start +// desc[4] = @intFromEnum(part.type); +// desc[5..8].* = mbr.encodeMbrChsEntry(lba + size - 1); // chs_end +// std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start +// std.mem.writeInt(u32, desc[12..16], size, .little); // block_count + +// auto_offset += part.size; +// } else { +// @memset(desc, 0); // inactive +// } +// } +// boot_sector[0x01FE] = 0x55; +// boot_sector[0x01FF] = 0xAA; + +// try disk.handle.writeAll(&boot_sector); +// } + +// { +// var auto_offset: u64 = 2048; +// for (table.partitions, 0..) |part_or_null, part_id| { +// const part = part_or_null orelse continue; + +// const reset_len = context.len; +// defer context.len = reset_len; + +// var buffer: [64]u8 = undefined; +// context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); + +// try writeDiskImage(b, asking, disk, base + auto_offset, part.size, part.data, context); + +// auto_offset += part.size; +// } +// } +// }, + +pub const Partition = struct { + pub const unused: Partition = .{ + .offset = null, + .size = 0, + .bootable = false, + .type = .empty, + .data = undefined, + }; + + offset: ?u64 = null, + size: ?u64, + + bootable: bool, + type: PartitionType, + + data: dim.Content, +}; + +/// https://en.wikipedia.org/wiki/Partition_type +pub const PartitionType = enum(u8) { + empty = 0x00, + + fat12 = 0x01, + ntfs = 0x07, + + fat32_chs = 0x0B, + fat32_lba = 0x0C, + + fat16_lba = 0x0E, + + linux_swap = 0x82, + linux_fs = 0x83, + linux_lvm = 0x8E, + + // Output from fdisk (util-linux 2.38.1) + // 00 Leer 27 Verst. NTFS Win 82 Linux Swap / So c1 DRDOS/sec (FAT- + // 01 FAT12 39 Plan 9 83 Linux c4 DRDOS/sec (FAT- + // 02 XENIX root 3c PartitionMagic 84 versteckte OS/2 c6 DRDOS/sec (FAT- + // 03 XENIX usr 40 Venix 80286 85 Linux erweitert c7 Syrinx + // 04 FAT16 <32M 41 PPC PReP Boot 86 NTFS Datenträge da Keine Dateisyst + // 05 Erweiterte 42 SFS 87 NTFS Datenträge db CP/M / CTOS / . + // 06 FAT16 4d QNX4.x 88 Linux Klartext de Dell Dienstprog + // 07 HPFS/NTFS/exFAT 4e QNX4.x 2. Teil 8e Linux LVM df BootIt + // 08 AIX 4f QNX4.x 3. Teil 93 Amoeba e1 DOS-Zugriff + // 09 AIX bootfähig 50 OnTrack DM 94 Amoeba BBT e3 DOS R/O + // 0a OS/2-Bootmanage 51 OnTrack DM6 Aux 9f BSD/OS e4 SpeedStor + // 0b W95 FAT32 52 CP/M a0 IBM Thinkpad Ru ea Linux erweitert + // 0c W95 FAT32 (LBA) 53 OnTrack DM6 Aux a5 FreeBSD eb BeOS Dateisyste + // 0e W95 FAT16 (LBA) 54 OnTrackDM6 a6 OpenBSD ee GPT + // 0f W95 Erw. (LBA) 55 EZ-Drive a7 NeXTSTEP ef EFI (FAT-12/16/ + // 10 OPUS 56 Golden Bow a8 Darwin UFS f0 Linux/PA-RISC B + // 11 Verst. FAT12 5c Priam Edisk a9 NetBSD f1 SpeedStor + // 12 Compaq Diagnost 61 SpeedStor ab Darwin Boot f4 SpeedStor + // 14 Verst. FAT16 <3 63 GNU HURD oder S af HFS / HFS+ f2 DOS sekundär + // 16 Verst. FAT16 64 Novell Netware b7 BSDi Dateisyste f8 EBBR geschützt + // 17 Verst. HPFS/NTF 65 Novell Netware b8 BSDI Swap fb VMware VMFS + // 18 AST SmartSleep 70 DiskSecure Mult bb Boot-Assistent fc VMware VMKCORE + // 1b Verst. W95 FAT3 75 PC/IX bc Acronis FAT32 L fd Linux RAID-Auto + // 1c Verst. W95 FAT3 80 Altes Minix be Solaris Boot fe LANstep + // 1e Verst. W95 FAT1 81 Minix / altes L bf Solaris ff BBT + // 24 NEC DOS + + _, +}; + +pub fn encodeMbrChsEntry(lba: u32) [3]u8 { + var chs = lbaToChs(lba); + + if (chs.cylinder >= 1024) { + chs = .{ + .cylinder = 1023, + .head = 255, + .sector = 63, + }; + } + + const cyl: u10 = @intCast(chs.cylinder); + const head: u8 = @intCast(chs.head); + const sect: u6 = @intCast(chs.sector); + + const sect_cyl: u8 = @as(u8, 0xC0) & @as(u8, @truncate(cyl >> 2)) + sect; + const sect_8: u8 = @truncate(cyl); + + return .{ head, sect_cyl, sect_8 }; +} + +const CHS = struct { + cylinder: u32, + head: u8, // limit: 256 + sector: u6, // limit: 64 + + pub fn init(c: u32, h: u8, s: u6) CHS { + return .{ .cylinder = c, .head = h, .sector = s }; + } +}; + +pub fn lbaToChs(lba: u32) CHS { + const hpc = 255; + const spt = 63; + + // C, H and S are the cylinder number, the head number, and the sector number + // LBA is the logical block address + // HPC is the maximum number of heads per cylinder (reported by disk drive, typically 16 for 28-bit LBA) + // SPT is the maximum number of sectors per track (reported by disk drive, typically 63 for 28-bit LBA) + // LBA = (C * HPC + H) * SPT + (S - 1) + + const sector = (lba % spt); + const cyl_head = (lba / spt); + + const head = (cyl_head % hpc); + const cyl = (cyl_head / hpc); + + return CHS{ + .sector = @intCast(sector + 1), + .head = @intCast(head), + .cylinder = cyl, + }; +} + +// test "lba to chs" { +// // table from https://en.wikipedia.org/wiki/Logical_block_addressing#CHS_conversion +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 1), mbr.lbaToChs(0)); +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 2), mbr.lbaToChs(1)); +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 3), mbr.lbaToChs(2)); +// try std.testing.expectEqual(mbr.CHS.init(0, 0, 63), mbr.lbaToChs(62)); +// try std.testing.expectEqual(mbr.CHS.init(0, 1, 1), mbr.lbaToChs(63)); +// try std.testing.expectEqual(mbr.CHS.init(0, 15, 1), mbr.lbaToChs(945)); +// try std.testing.expectEqual(mbr.CHS.init(0, 15, 63), mbr.lbaToChs(1007)); +// try std.testing.expectEqual(mbr.CHS.init(1, 0, 1), mbr.lbaToChs(1008)); +// try std.testing.expectEqual(mbr.CHS.init(1, 0, 63), mbr.lbaToChs(1070)); +// try std.testing.expectEqual(mbr.CHS.init(1, 1, 1), mbr.lbaToChs(1071)); +// try std.testing.expectEqual(mbr.CHS.init(1, 1, 63), mbr.lbaToChs(1133)); +// try std.testing.expectEqual(mbr.CHS.init(1, 2, 1), mbr.lbaToChs(1134)); +// try std.testing.expectEqual(mbr.CHS.init(1, 15, 63), mbr.lbaToChs(2015)); +// try std.testing.expectEqual(mbr.CHS.init(2, 0, 1), mbr.lbaToChs(2016)); +// try std.testing.expectEqual(mbr.CHS.init(15, 15, 63), mbr.lbaToChs(16127)); +// try std.testing.expectEqual(mbr.CHS.init(16, 0, 1), mbr.lbaToChs(16128)); +// try std.testing.expectEqual(mbr.CHS.init(31, 15, 63), mbr.lbaToChs(32255)); +// try std.testing.expectEqual(mbr.CHS.init(32, 0, 1), mbr.lbaToChs(32256)); +// try std.testing.expectEqual(mbr.CHS.init(16319, 15, 63), mbr.lbaToChs(16450559)); +// try std.testing.expectEqual(mbr.CHS.init(16382, 15, 63), mbr.lbaToChs(16514063)); +// } diff --git a/src/dim.zig b/src/dim.zig index 0a5fb75..f2a942c 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -85,11 +85,12 @@ pub fn main() !u8 { const script_source = try current_dir.readFileAlloc(gpa, script_path, max_script_size); defer gpa.free(script_source); - var output_file = try current_dir.atomicFile(output_path, .{}); - defer output_file.deinit(); + var mem_arena: std.heap.ArenaAllocator = .init(gpa); + defer mem_arena.deinit(); var env = Environment{ .allocator = gpa, + .arena = mem_arena.allocator(), .vars = &var_map, .include_base = current_dir, .parser = undefined, @@ -111,9 +112,34 @@ pub fn main() !u8 { .contents = script_source, }); - try env.execute_content(&parser); + const root_content: Content = env.parse_content() catch |err| switch (err) { + error.FatalConfigError => return 1, - try output_file.finish(); + else => |e| return e, + }; + + if (env.error_flag) { + return 1; + } + + { + var output_file = try current_dir.atomicFile(output_path, .{}); + defer output_file.deinit(); + + const size_limit: ?u64 = if (options.size) |disk_size| blk: { + try output_file.file.setEndPos(disk_size.size_in_bytes()); + + break :blk disk_size.size_in_bytes(); + } else null; + + var stream = BinaryStream{ + .capacity = size_limit orelse 0, + }; + + try root_content.render(&stream); + + try output_file.finish(); + } return 0; } @@ -126,9 +152,9 @@ fn fatal(msg: []const u8) noreturn { const content_types: []const struct { []const u8, type } = &.{ .{ "mbr-part", @import("components/part/MbrPartitionTable.zig") }, - .{ "gpt-part", @import("components/part/GptPartitionTable.zig") }, - .{ "fat", @import("components/fs/FatFileSystem.zig") }, - .{ "raw", @import("components/RawData.zig") }, + // .{ "gpt-part", @import("components/part/GptPartitionTable.zig") }, + // .{ "fat", @import("components/fs/FatFileSystem.zig") }, + .{ "paste-file", @import("components/PasteFile.zig") }, .{ "empty", @import("components/EmptyData.zig") }, .{ "fill", @import("components/FillData.zig") }, }; @@ -136,79 +162,157 @@ const content_types: []const struct { []const u8, type } = &.{ pub const Context = struct { env: *Environment, - pub const WriteError = error{}; - pub const Writer = std.io.Writer(*const Context, WriteError, write_some_data); - - pub fn get_remaining_size(ctx: Context) ?u64 { - _ = ctx; - - // TODO: This - return null; + pub fn alloc_object(ctx: Context, comptime T: type) error{OutOfMemory}!*T { + return try ctx.env.arena.create(T); } - pub fn open_file(ctx: Context, path: []const u8) !std.fs.File { - const abs_path = try ctx.env.parser.get_include_path(ctx.env.allocator, path); - defer ctx.env.allocator.free(abs_path); - - return ctx.env.include_base.openFile(abs_path, .{}); + pub fn report_nonfatal_error(ctx: Context, comptime msg: []const u8, params: anytype) error{OutOfMemory}!void { + try ctx.env.report_error(msg, params); } - pub fn writer(ctx: *const Context) Writer { - return .{ .context = ctx }; + pub fn report_fatal_error(ctx: Context, comptime msg: []const u8, params: anytype) error{ FatalConfigError, OutOfMemory } { + try ctx.env.report_error(msg, params); + return error.FatalConfigError; } - pub fn get_string(ctx: Context) ![]const u8 { + pub fn parse_string(ctx: Context) Environment.ParseError![]const u8 { return ctx.env.parser.next(); } - pub fn get_enum(ctx: Context, comptime E: type) !E { + pub fn parse_file_name(ctx: Context) Environment.ParseError!FileName { + const rel_path = try ctx.parse_string(); + + const abs_path = try ctx.env.parser.get_include_path(ctx.env.arena, rel_path); + + return .{ + .root_dir = ctx.env.include_base, + .rel_path = abs_path, + }; + } + + pub fn parse_enum(ctx: Context, comptime E: type) Environment.ParseError!E { if (@typeInfo(E) != .@"enum") @compileError("get_enum requires an enum type!"); return std.meta.stringToEnum( E, - ctx.get_string(), + try ctx.parse_string(), ) orelse return error.InvalidEnumTag; } - pub fn get_integer(ctx: Context, comptime I: type, base: u8) !I { + pub fn parse_integer(ctx: Context, comptime I: type, base: u8) Environment.ParseError!I { if (@typeInfo(I) != .int) @compileError("get_integer requires an integer type!"); - return try std.fmt.parseInt( + return std.fmt.parseInt( I, - try ctx.get_string(), + try ctx.parse_string(), base, - ); + ) catch return error.InvalidNumber; } - fn write_some_data(ctx: *const Context, buffer: []const u8) WriteError!usize { - _ = ctx; - // TODO: Implement this! - return buffer.len; + pub fn parse_mem_size(ctx: Context) Environment.ParseError!u64 { + const str = try ctx.parse_string(); + + const ds: DiskSize = try .parse(str); + + return ds.size_in_bytes(); + } + + pub fn parse_content(ctx: Context) Environment.ParseError!Content { + const content_type_str = try ctx.env.parser.next(); + + inline for (content_types) |tn| { + const name, const impl = tn; + + if (std.mem.eql(u8, name, content_type_str)) { + const content: Content = try impl.parse(ctx); + + return content; + } + } + + return ctx.report_fatal_error("unknown content type: '{}'", .{ + std.zig.fmtEscapes(content_type_str), + }); } }; +pub fn FieldUpdater(comptime Obj: type, comptime optional_fields: []const std.meta.FieldEnum(Obj)) type { + return struct { + const FUP = @This(); + const FieldName = std.meta.FieldEnum(Obj); + + ctx: Context, + target: *Obj, + + updated_fields: std.EnumSet(FieldName) = .initEmpty(), + + pub fn init(ctx: Context, target: *Obj) FUP { + return .{ + .ctx = ctx, + .target = target, + }; + } + + pub fn set(fup: *FUP, comptime field: FieldName, value: @FieldType(Obj, @tagName(field))) !void { + if (fup.updated_fields.contains(field)) { + fup.ctx.report_nonfatal_error("duplicate assignment of {s}.{s}", .{ + @typeName(Obj), + @tagName(field), + }); + } + + @field(fup.target, @tagName(field)) = value; + fup.updated_fields.insert(field); + } + + pub fn validate(fup: FUP) !void { + var missing_fields = fup.updated_fields; + for (optional_fields) |fld| { + missing_fields.insert(fld); + } + missing_fields = missing_fields.complement(); + var iter = missing_fields.iterator(); + while (iter.next()) |fld| { + fup.ctx.report_nonfatal_error("missing assignment of {s}.{s}", .{ + @typeName(Obj), + @tagName(fld), + }); + } + } + }; +} + const Environment = struct { + const ParseError = Parser.Error || error{ + OutOfMemory, + UnexpectedEndOfFile, + InvalidNumber, + UnknownContentType, + FatalConfigError, + InvalidEnumTag, + }; + + arena: std.mem.Allocator, allocator: std.mem.Allocator, parser: *Parser, include_base: std.fs.Dir, vars: *const VariableMap, + error_flag: bool = false, io: Parser.IO = .{ .fetch_file_fn = fetch_file, .resolve_variable_fn = resolve_var, }, - fn execute_content(env: *Environment, parser: *Parser) !void { - const content_type_str = try parser.next(); + fn parse_content(env: *Environment) ParseError!Content { + var ctx = Context{ .env = env }; - inline for (content_types) |tn| { - const name, const impl = tn; + return try ctx.parse_content(); + } - if (std.mem.eql(u8, name, content_type_str)) { - return impl.execute(Context{ .env = env }); - } - } - return error.UnknownContentType; + fn report_error(env: *Environment, comptime fmt: []const u8, params: anytype) error{OutOfMemory}!void { + env.error_flag = true; + std.log.err("PARSE ERROR: " ++ fmt, params); } fn fetch_file(io: *const Parser.IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { @@ -226,6 +330,221 @@ const Environment = struct { } }; +/// A "Content" is something that will fill a given space of a disk image. +/// It can be raw data, a pattern, a file system, a partition table, ... +/// +/// +pub const Content = struct { + pub const RenderError = FileName.OpenError || FileHandle.ReadError || error{WriteError}; + pub const GuessError = FileName.GetSizeError; + + obj: *anyopaque, + vtable: *const VTable, + + pub fn create_handle(obj: *anyopaque, vtable: *const VTable) Content { + return .{ .obj = obj, .vtable = vtable }; + } + + /// Emits the content into a binary stream. + pub fn render(content: Content, stream: *BinaryStream) RenderError!void { + try content.vtable.render_fn(content.obj, stream); + } + + /// Attempts to determine the required size of the content. + /// + /// This may not be an exact guess, so the result can have + pub fn guess_required_size(content: Content) GuessError!SizeGuess { + return try content.vtable.guess_size_fn(content.obj); + } + + pub const VTable = struct { + render_fn: *const fn (*anyopaque, *BinaryStream) RenderError!void, + guess_size_fn: *const fn (*anyopaque) GuessError!SizeGuess, + + pub fn create(comptime Container: type, comptime funcs: struct { + render_fn: *const fn (*Container, *BinaryStream) RenderError!void, + guess_size_fn: *const fn (*Container) GuessError!SizeGuess, + }) *const VTable { + const Wrap = struct { + fn render(self: *anyopaque, stream: *BinaryStream) RenderError!void { + return funcs.render_fn( + @ptrCast(@alignCast(self)), + stream, + ); + } + fn guess_size(self: *anyopaque) GuessError!SizeGuess { + return funcs.guess_size_fn( + @ptrCast(@alignCast(self)), + ); + } + }; + return comptime &.{ + .render_fn = Wrap.render, + .guess_size_fn = Wrap.guess_size, + }; + } + }; +}; + +pub const FileName = struct { + root_dir: std.fs.Dir, + rel_path: []const u8, + + pub const OpenError = error{ FileNotFound, InvalidPath, IoError }; + + pub fn open(name: FileName) OpenError!FileHandle { + const file = name.root_dir.openFile(name.rel_path, .{}) catch |err| switch (err) { + error.FileNotFound => return error.FileNotFound, + + error.NameTooLong, + error.InvalidWtf8, + error.BadPathName, + error.InvalidUtf8, + => return error.InvalidPath, + + error.NoSpaceLeft, + error.FileTooBig, + error.DeviceBusy, + error.AccessDenied, + error.SystemResources, + error.WouldBlock, + error.NoDevice, + error.Unexpected, + error.SharingViolation, + error.PathAlreadyExists, + error.PipeBusy, + error.NetworkNotFound, + error.AntivirusInterference, + error.SymLinkLoop, + error.ProcessFdQuotaExceeded, + error.SystemFdQuotaExceeded, + error.IsDir, + error.NotDir, + error.FileLocksNotSupported, + error.FileBusy, + => return error.IoError, + }; + return .{ .file = file }; + } + + pub const GetSizeError = error{ FileNotFound, InvalidPath, IoError }; + pub fn get_size(name: FileName) GetSizeError!u64 { + const stat = name.root_dir.statFile(name.rel_path) catch |err| switch (err) { + error.FileNotFound => return error.FileNotFound, + + error.NameTooLong, + error.InvalidWtf8, + error.BadPathName, + error.InvalidUtf8, + => return error.InvalidPath, + + error.NoSpaceLeft, + error.FileTooBig, + error.DeviceBusy, + error.AccessDenied, + error.SystemResources, + error.WouldBlock, + error.NoDevice, + error.Unexpected, + error.SharingViolation, + error.PathAlreadyExists, + error.PipeBusy, + + error.NetworkNotFound, + error.AntivirusInterference, + error.SymLinkLoop, + error.ProcessFdQuotaExceeded, + error.SystemFdQuotaExceeded, + error.IsDir, + error.NotDir, + error.FileLocksNotSupported, + error.FileBusy, + => return error.IoError, + }; + return stat.size; + } + + pub fn copy_to(file: FileName, stream: *BinaryStream) (OpenError || FileHandle.ReadError || BinaryStream.WriteError)!void { + var handle = try file.open(); + defer handle.close(); + + var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); + + try fifo.pump( + handle.reader(), + stream.writer(), + ); + } +}; + +pub const FileHandle = struct { + pub const ReadError = error{ReadFileFailed}; + + pub const Reader = std.io.Reader(std.fs.File, ReadError, read_some); + + file: std.fs.File, + + pub fn close(fd: *FileHandle) void { + fd.file.close(); + fd.* = undefined; + } + + pub fn reader(fd: FileHandle) Reader { + return .{ .context = fd.file }; + } + + fn read_some(file: std.fs.File, data: []u8) ReadError!usize { + return file.read(data) catch |err| switch (err) { + error.InputOutput, + error.AccessDenied, + error.BrokenPipe, + error.SystemResources, + error.OperationAborted, + error.LockViolation, + error.WouldBlock, + error.ConnectionResetByPeer, + error.ProcessNotFound, + error.Unexpected, + error.IsDir, + error.ConnectionTimedOut, + error.NotOpenForReading, + error.SocketNotConnected, + error.Canceled, + => return error.ReadFileFailed, + }; + } +}; + +pub const SizeGuess = union(enum) { + unknown, + exact: u64, + at_least: u64, + at_most: u64, +}; + +pub const BinaryStream = struct { + pub const WriteError = error{IoError}; + pub const Writer = std.io.Writer(*BinaryStream, WriteError, write_some); + + /// Max number of bytes that can be written + capacity: u64, + + pub fn writer(bs: *BinaryStream) Writer { + return .{ .context = bs }; + } + + fn write_some(stream: *BinaryStream, data: []const u8) WriteError!usize { + _ = stream; + // TODO: Implement write_some! + + // std.debug.print("dummy write of '{}'\n", .{ + // std.fmt.fmtSliceHexUpper(data), + // }); + + return data.len; + } +}; + test { _ = Tokenizer; _ = Parser; diff --git a/tests/basic/raw.dis b/tests/basic/raw.dis index 094ac08..8b1cb37 100644 --- a/tests/basic/raw.dis +++ b/tests/basic/raw.dis @@ -1 +1 @@ -raw ./raw.dis +paste-file ./raw.dis From 16292924349cd77f9990ead46bf4b156c16c6b70 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki <der.teufel.mail@gmail.com> Date: Sun, 9 Mar 2025 14:55:46 +0100 Subject: [PATCH 08/26] Make MbrPartitionTable compile --- src/components/part/MbrPartitionTable.zig | 11 +++---- src/dim.zig | 35 +++++++++++++++++++---- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig index 3febf71..958afc7 100644 --- a/src/components/part/MbrPartitionTable.zig +++ b/src/components/part/MbrPartitionTable.zig @@ -50,7 +50,7 @@ pub fn parse(ctx: dim.Context) !dim.Content { } } - return .create_handle(pf, .create(@This(), .{ + return .create_handle(pf, .create(PartTable, .{ .guess_size_fn = guess_size, .render_fn = render, })); @@ -74,19 +74,20 @@ fn parse_partition(ctx: dim.Context) !Partition { parse_loop: while (true) { const kw = try ctx.parse_enum(enum { type, - size, bootable, + size, + offset, contents, endpart, }); - switch (kw) { + try switch (kw) { .type => updater.set(.type, try ctx.parse_enum(PartitionType)), .bootable => updater.set(.bootable, true), .size => updater.set(.size, try ctx.parse_mem_size()), .offset => updater.set(.offset, try ctx.parse_mem_size()), - .contents => updater.set(.contents, try ctx.parse_content()), + .contents => updater.set(.data, try ctx.parse_content()), .endpart => break :parse_loop, - } + }; } try updater.validate(); diff --git a/src/dim.zig b/src/dim.zig index f2a942c..aa2cef6 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -255,7 +255,7 @@ pub fn FieldUpdater(comptime Obj: type, comptime optional_fields: []const std.me pub fn set(fup: *FUP, comptime field: FieldName, value: @FieldType(Obj, @tagName(field))) !void { if (fup.updated_fields.contains(field)) { - fup.ctx.report_nonfatal_error("duplicate assignment of {s}.{s}", .{ + try fup.ctx.report_nonfatal_error("duplicate assignment of {s}.{s}", .{ @typeName(Obj), @tagName(field), }); @@ -273,7 +273,7 @@ pub fn FieldUpdater(comptime Obj: type, comptime optional_fields: []const std.me missing_fields = missing_fields.complement(); var iter = missing_fields.iterator(); while (iter.next()) |fld| { - fup.ctx.report_nonfatal_error("missing assignment of {s}.{s}", .{ + try fup.ctx.report_nonfatal_error("missing assignment of {s}.{s}", .{ @typeName(Obj), @tagName(fld), }); @@ -290,6 +290,8 @@ const Environment = struct { UnknownContentType, FatalConfigError, InvalidEnumTag, + Overflow, + InvalidSize, }; arena: std.mem.Allocator, @@ -341,6 +343,24 @@ pub const Content = struct { obj: *anyopaque, vtable: *const VTable, + pub const empty: Content = .{ + .obj = undefined, + .vtable = &emptyVTable, + }; + + const emptyVTable: VTable = blk: { + const Wrap = struct { + fn render(_: *anyopaque, _: *BinaryStream) RenderError!void {} + fn guess_size_fn(_: *anyopaque) GuessError!SizeGuess { + return .{ .exact = 0 }; + } + }; + break :blk .{ + .render_fn = Wrap.render, + .guess_size_fn = Wrap.guess_size_fn, + }; + }; + pub fn create_handle(obj: *anyopaque, vtable: *const VTable) Content { return .{ .obj = obj, .vtable = vtable }; } @@ -361,10 +381,13 @@ pub const Content = struct { render_fn: *const fn (*anyopaque, *BinaryStream) RenderError!void, guess_size_fn: *const fn (*anyopaque) GuessError!SizeGuess, - pub fn create(comptime Container: type, comptime funcs: struct { - render_fn: *const fn (*Container, *BinaryStream) RenderError!void, - guess_size_fn: *const fn (*Container) GuessError!SizeGuess, - }) *const VTable { + pub fn create( + comptime Container: type, + comptime funcs: struct { + render_fn: *const fn (*Container, *BinaryStream) RenderError!void, + guess_size_fn: *const fn (*Container) GuessError!SizeGuess, + }, + ) *const VTable { const Wrap = struct { fn render(self: *anyopaque, stream: *BinaryStream) RenderError!void { return funcs.render_fn( From 7ddee31c5634c34906ce29736109ee7c2d5f9c48 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki <der.teufel.mail@gmail.com> Date: Sun, 9 Mar 2025 16:30:27 +0100 Subject: [PATCH 09/26] Remove demo step from CI; add running tests to CI --- .github/workflows/validate.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index 89194e8..ce685bf 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -22,6 +22,6 @@ jobs: run: | zig build - - name: Compile and run demo + - name: Compile and run tests run: | - zig build install debug + zig build test From 86a3ba53024249b307b2533e73ae59124f63fb5f Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki <der.teufel.mail@gmail.com> Date: Sun, 9 Mar 2025 19:54:31 +0100 Subject: [PATCH 10/26] WIP: Behaviour tests in build.zig --- build.zig | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/build.zig b/build.zig index 1a5293f..9f078e2 100644 --- a/build.zig +++ b/build.zig @@ -51,4 +51,37 @@ pub fn build(b: *std.Build) void { }); const run_dim_tests = b.addRunArtifact(dim_tests); test_step.dependOn(&run_dim_tests.step); + + const behaviour_tests_step = b.step("behaviour", "Run all behaviour tests"); + for (behaviour_tests) |script| { + const step_name = b.dupe(script); + std.mem.replaceScalar(u8, step_name, '/', '-'); + const script_test = b.step(step_name, b.fmt("Run {s} behaviour test", .{script})); + + const run_sizeless = b.addRunArtifact(dim_exe); + run_sizeless.addArg("--output"); + _ = run_sizeless.addOutputFileArg("disk.img"); + run_sizeless.addArg("--script"); + run_sizeless.addFileArg(b.path(script)); + script_test.dependOn(&run_sizeless.step); + + const run_with_size = b.addRunArtifact(dim_exe); + run_with_size.addArg("--output"); + _ = run_with_size.addOutputFileArg("disk.img"); + run_with_size.addArg("--script"); + run_with_size.addFileArg(b.path(script)); + run_with_size.addArgs(&.{ "--size", "30M" }); + script_test.dependOn(&run_with_size.step); + + behaviour_tests_step.dependOn(script_test); + } } + +const behaviour_tests: []const []const u8 = &.{ + "tests/basic/empty.dis", + "tests/basic/fill-0x00.dis", + "tests/basic/fill-0xAA.dis", + "tests/basic/fill-0xFF.dis", + "tests/basic/raw.dis", + "tests/part/mbr/minimal.dis", +}; From c051270f904f4948d9fb788fbe0d6876a492ab54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Mon, 10 Mar 2025 13:42:25 +0100 Subject: [PATCH 11/26] More work on the MBR part table --- justfile | 5 ++- src/components/part/MbrPartitionTable.zig | 46 +++++++++++++++++--- src/dim.zig | 4 +- tests/part/mbr/basic-single-part-sized.dis | 9 ++++ tests/part/mbr/basic-single-part-unsized.dis | 8 ++++ tests/part/mbr/no-part-bootloader.dis | 6 +++ 6 files changed, 70 insertions(+), 8 deletions(-) create mode 100644 tests/part/mbr/basic-single-part-sized.dis create mode 100644 tests/part/mbr/basic-single-part-unsized.dis create mode 100644 tests/part/mbr/no-part-bootloader.dis diff --git a/justfile b/justfile index 55d2044..d8fb79a 100644 --- a/justfile +++ b/justfile @@ -17,7 +17,10 @@ behaviour-tests: \ (behaviour-test "tests/basic/fill-0xAA.dis") \ (behaviour-test "tests/basic/fill-0xFF.dis") \ (behaviour-test "tests/basic/raw.dis") \ - (behaviour-test "tests/part/mbr/minimal.dis") + (behaviour-test "tests/part/mbr/minimal.dis") \ + (behaviour-test "tests/part/mbr/no-part-bootloader.dis") \ + (behaviour-test "tests/part/mbr/basic-single-part-sized.dis") \ + (behaviour-test "tests/part/mbr/basic-single-part-unsized.dis") behaviour-test script: install ./zig-out/bin/dim --output .zig-cache/disk.img --script "{{script}}" diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig index 958afc7..dc693ae 100644 --- a/src/components/part/MbrPartitionTable.zig +++ b/src/components/part/MbrPartitionTable.zig @@ -25,6 +25,7 @@ pub fn parse(ctx: dim.Context) !dim.Content { }; var next_part_id: usize = 0; + var last_part_id: ?usize = null; while (next_part_id < pf.partitions.len) { const kw = try ctx.parse_enum(enum { bootloader, @@ -45,11 +46,20 @@ pub fn parse(ctx: dim.Context) !dim.Content { }, .part => { pf.partitions[next_part_id] = try parse_partition(ctx); + last_part_id = next_part_id; next_part_id += 1; }, } } + if (last_part_id) |part_id| { + for (0..part_id -| 1) |prev| { + if (pf.partitions[prev].?.size == null) { + try ctx.report_nonfatal_error("MBR partition {} does not have a size, but is not last.", .{prev}); + } + } + } + return .create_handle(pf, .create(PartTable, .{ .guess_size_fn = guess_size, .render_fn = render, @@ -62,7 +72,7 @@ fn parse_partition(ctx: dim.Context) !Partition { .size = null, .bootable = false, .type = .empty, - .data = .empty, + .contains = .empty, }; var updater: dim.FieldUpdater(Partition, &.{ @@ -77,7 +87,7 @@ fn parse_partition(ctx: dim.Context) !Partition { bootable, size, offset, - contents, + contains, endpart, }); try switch (kw) { @@ -85,7 +95,7 @@ fn parse_partition(ctx: dim.Context) !Partition { .bootable => updater.set(.bootable, true), .size => updater.set(.size, try ctx.parse_mem_size()), .offset => updater.set(.offset, try ctx.parse_mem_size()), - .contents => updater.set(.data, try ctx.parse_content()), + .contains => updater.set(.contains, try ctx.parse_content()), .endpart => break :parse_loop, }; } @@ -96,7 +106,31 @@ fn parse_partition(ctx: dim.Context) !Partition { } fn guess_size(self: *PartTable) dim.Content.GuessError!dim.SizeGuess { - _ = self; + var upper_bound: u64 = 512; + var all_parts_bounded = true; + + for (self.partitions) |mpart| { + const part = mpart orelse continue; + + if (part.offset != null and part.size != null) { + upper_bound = @max(upper_bound, part.offset.? + part.size.?); + } else { + all_parts_bounded = false; + } + } + if (all_parts_bounded) + return .{ .exact = upper_bound }; + + for (self.partitions) |mpart| { + const part = mpart orelse continue; + + if (part.offset != null and part.size != null) { + upper_bound = @max(upper_bound, part.offset.? + part.size.?); + } else { + all_parts_bounded = false; + } + } + @panic("not implemented yet!"); } @@ -211,7 +245,7 @@ pub const Partition = struct { .size = 0, .bootable = false, .type = .empty, - .data = undefined, + .contains = .empty, }; offset: ?u64 = null, @@ -220,7 +254,7 @@ pub const Partition = struct { bootable: bool, type: PartitionType, - data: dim.Content, + contains: dim.Content, }; /// https://en.wikipedia.org/wiki/Partition_type diff --git a/src/dim.zig b/src/dim.zig index aa2cef6..9917ac9 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -122,6 +122,9 @@ pub fn main() !u8 { return 1; } + const root_size_estimation = try root_content.guess_required_size(); + std.log.info("root size: {}", .{root_size_estimation}); + { var output_file = try current_dir.atomicFile(output_path, .{}); defer output_file.deinit(); @@ -542,7 +545,6 @@ pub const SizeGuess = union(enum) { unknown, exact: u64, at_least: u64, - at_most: u64, }; pub const BinaryStream = struct { diff --git a/tests/part/mbr/basic-single-part-sized.dis b/tests/part/mbr/basic-single-part-sized.dis new file mode 100644 index 0000000..e4bcc74 --- /dev/null +++ b/tests/part/mbr/basic-single-part-sized.dis @@ -0,0 +1,9 @@ +mbr-part + part + type empty + contains fill 0xAA + size 10M + endpart + ignore # partition 2 + ignore # partition 3 + ignore # partition 4 diff --git a/tests/part/mbr/basic-single-part-unsized.dis b/tests/part/mbr/basic-single-part-unsized.dis new file mode 100644 index 0000000..09b15d8 --- /dev/null +++ b/tests/part/mbr/basic-single-part-unsized.dis @@ -0,0 +1,8 @@ +mbr-part + part + type empty + contains fill 0xAA + endpart + ignore # partition 2 + ignore # partition 3 + ignore # partition 4 diff --git a/tests/part/mbr/no-part-bootloader.dis b/tests/part/mbr/no-part-bootloader.dis new file mode 100644 index 0000000..095ff90 --- /dev/null +++ b/tests/part/mbr/no-part-bootloader.dis @@ -0,0 +1,6 @@ +mbr-part + bootloader paste-file ./minimal.dis + ignore # partition 1 + ignore # partition 2 + ignore # partition 3 + ignore # partition 4 From 0db84d3c3d8c39d3ce9c4c081e0f29d2e510a178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Tue, 11 Mar 2025 11:02:23 +0100 Subject: [PATCH 12/26] Makes --size unconditional, removes guessing disk size --- justfile | 1 - src/components/EmptyData.zig | 10 +--- src/components/FillData.zig | 5 -- src/components/PasteFile.zig | 7 --- src/components/part/MbrPartitionTable.zig | 30 ------------ src/dim.zig | 59 +++++------------------ 6 files changed, 13 insertions(+), 99 deletions(-) diff --git a/justfile b/justfile index d8fb79a..c2ee041 100644 --- a/justfile +++ b/justfile @@ -23,7 +23,6 @@ behaviour-tests: \ (behaviour-test "tests/part/mbr/basic-single-part-unsized.dis") behaviour-test script: install - ./zig-out/bin/dim --output .zig-cache/disk.img --script "{{script}}" ./zig-out/bin/dim --output .zig-cache/disk.img --script "{{script}}" --size 30M diff --git a/src/components/EmptyData.zig b/src/components/EmptyData.zig index 1705f0f..da335f0 100644 --- a/src/components/EmptyData.zig +++ b/src/components/EmptyData.zig @@ -9,18 +9,12 @@ const dim = @import("../dim.zig"); const EmptyData = @This(); pub fn parse(ctx: dim.Context) !dim.Content { - const pf = try ctx.alloc_object(EmptyData); - pf.* = .{}; - return .create_handle(pf, .create(@This(), .{ - .guess_size_fn = guess_size, + _ = ctx; + return .create_handle(undefined, .create(@This(), .{ .render_fn = render, })); } -fn guess_size(_: *EmptyData) dim.Content.GuessError!dim.SizeGuess { - return .{ .at_least = 0 }; -} - fn render(self: *EmptyData, stream: *dim.BinaryStream) dim.Content.RenderError!void { _ = self; _ = stream; diff --git a/src/components/FillData.zig b/src/components/FillData.zig index 37df9cc..6bd1e2a 100644 --- a/src/components/FillData.zig +++ b/src/components/FillData.zig @@ -15,15 +15,10 @@ pub fn parse(ctx: dim.Context) !dim.Content { .fill_value = try ctx.parse_integer(u8, 0), }; return .create_handle(pf, .create(@This(), .{ - .guess_size_fn = guess_size, .render_fn = render, })); } -fn guess_size(_: *FillData) dim.Content.GuessError!dim.SizeGuess { - return .{ .at_least = 0 }; -} - fn render(self: *FillData, stream: *dim.BinaryStream) dim.Content.RenderError!void { try stream.writer().writeByteNTimes( self.fill_value, diff --git a/src/components/PasteFile.zig b/src/components/PasteFile.zig index 01eccf4..3cbd9e7 100644 --- a/src/components/PasteFile.zig +++ b/src/components/PasteFile.zig @@ -11,17 +11,10 @@ pub fn parse(ctx: dim.Context) !dim.Content { .file_handle = try ctx.parse_file_name(), }; return .create_handle(pf, .create(@This(), .{ - .guess_size_fn = guess_size, .render_fn = render, })); } -fn guess_size(self: *PasteFile) dim.Content.GuessError!dim.SizeGuess { - const size = try self.file_handle.get_size(); - - return .{ .exact = size }; -} - fn render(self: *PasteFile, stream: *dim.BinaryStream) dim.Content.RenderError!void { try self.file_handle.copy_to(stream); } diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig index dc693ae..0db62ca 100644 --- a/src/components/part/MbrPartitionTable.zig +++ b/src/components/part/MbrPartitionTable.zig @@ -61,7 +61,6 @@ pub fn parse(ctx: dim.Context) !dim.Content { } return .create_handle(pf, .create(PartTable, .{ - .guess_size_fn = guess_size, .render_fn = render, })); } @@ -105,35 +104,6 @@ fn parse_partition(ctx: dim.Context) !Partition { return part; } -fn guess_size(self: *PartTable) dim.Content.GuessError!dim.SizeGuess { - var upper_bound: u64 = 512; - var all_parts_bounded = true; - - for (self.partitions) |mpart| { - const part = mpart orelse continue; - - if (part.offset != null and part.size != null) { - upper_bound = @max(upper_bound, part.offset.? + part.size.?); - } else { - all_parts_bounded = false; - } - } - if (all_parts_bounded) - return .{ .exact = upper_bound }; - - for (self.partitions) |mpart| { - const part = mpart orelse continue; - - if (part.offset != null and part.size != null) { - upper_bound = @max(upper_bound, part.offset.? + part.size.?); - } else { - all_parts_bounded = false; - } - } - - @panic("not implemented yet!"); -} - fn render(self: *PartTable, stream: *dim.BinaryStream) dim.Content.RenderError!void { _ = self; _ = stream; diff --git a/src/dim.zig b/src/dim.zig index 9917ac9..c445dd1 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -11,7 +11,7 @@ const max_script_size = 10 * DiskSize.MiB; const Options = struct { output: ?[]const u8 = null, - size: ?DiskSize = null, + size: DiskSize = DiskSize.empty, script: ?[]const u8 = null, @"import-env": bool = false, }; @@ -79,6 +79,11 @@ pub fn main() !u8 { if (bad_args) return 1; + const size_limit: u64 = options.size.size_in_bytes(); + if (size_limit == 0) { + return fatal("--size must be given!"); + } + var current_dir = try std.fs.cwd().openDir(".", .{}); defer current_dir.close(); @@ -122,21 +127,14 @@ pub fn main() !u8 { return 1; } - const root_size_estimation = try root_content.guess_required_size(); - std.log.info("root size: {}", .{root_size_estimation}); - { var output_file = try current_dir.atomicFile(output_path, .{}); defer output_file.deinit(); - const size_limit: ?u64 = if (options.size) |disk_size| blk: { - try output_file.file.setEndPos(disk_size.size_in_bytes()); - - break :blk disk_size.size_in_bytes(); - } else null; + try output_file.file.setEndPos(size_limit); var stream = BinaryStream{ - .capacity = size_limit orelse 0, + .capacity = size_limit, }; try root_content.render(&stream); @@ -346,23 +344,7 @@ pub const Content = struct { obj: *anyopaque, vtable: *const VTable, - pub const empty: Content = .{ - .obj = undefined, - .vtable = &emptyVTable, - }; - - const emptyVTable: VTable = blk: { - const Wrap = struct { - fn render(_: *anyopaque, _: *BinaryStream) RenderError!void {} - fn guess_size_fn(_: *anyopaque) GuessError!SizeGuess { - return .{ .exact = 0 }; - } - }; - break :blk .{ - .render_fn = Wrap.render, - .guess_size_fn = Wrap.guess_size_fn, - }; - }; + pub const empty: Content = @import("components/EmptyData.zig").parse(undefined) catch unreachable; pub fn create_handle(obj: *anyopaque, vtable: *const VTable) Content { return .{ .obj = obj, .vtable = vtable }; @@ -373,22 +355,13 @@ pub const Content = struct { try content.vtable.render_fn(content.obj, stream); } - /// Attempts to determine the required size of the content. - /// - /// This may not be an exact guess, so the result can have - pub fn guess_required_size(content: Content) GuessError!SizeGuess { - return try content.vtable.guess_size_fn(content.obj); - } - pub const VTable = struct { render_fn: *const fn (*anyopaque, *BinaryStream) RenderError!void, - guess_size_fn: *const fn (*anyopaque) GuessError!SizeGuess, pub fn create( comptime Container: type, comptime funcs: struct { render_fn: *const fn (*Container, *BinaryStream) RenderError!void, - guess_size_fn: *const fn (*Container) GuessError!SizeGuess, }, ) *const VTable { const Wrap = struct { @@ -398,15 +371,9 @@ pub const Content = struct { stream, ); } - fn guess_size(self: *anyopaque) GuessError!SizeGuess { - return funcs.guess_size_fn( - @ptrCast(@alignCast(self)), - ); - } }; return comptime &.{ .render_fn = Wrap.render, - .guess_size_fn = Wrap.guess_size, }; } }; @@ -541,12 +508,6 @@ pub const FileHandle = struct { } }; -pub const SizeGuess = union(enum) { - unknown, - exact: u64, - at_least: u64, -}; - pub const BinaryStream = struct { pub const WriteError = error{IoError}; pub const Writer = std.io.Writer(*BinaryStream, WriteError, write_some); @@ -580,6 +541,8 @@ const DiskSize = enum(u64) { const MiB = 1024 * 1024; const GiB = 1024 * 1024 * 1024; + pub const empty: DiskSize = @enumFromInt(0); + _, pub fn parse(str: []const u8) error{ InvalidSize, Overflow }!DiskSize { From 1f99f5eb6620d5a6944d073620ab4101df0b52c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Tue, 11 Mar 2025 12:30:02 +0100 Subject: [PATCH 13/26] Implements basic writing to files --- .gitignore | 3 +- justfile | 6 +- src/components/FillData.zig | 2 +- src/dim.zig | 113 ++++++++++++++++++++++++++++++++---- 4 files changed, 108 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index d909469..dfe5b37 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .zig-cache/ zig-out/ -.vscode/ \ No newline at end of file +.vscode/ +.dim-out/ \ No newline at end of file diff --git a/justfile b/justfile index c2ee041..373f1d9 100644 --- a/justfile +++ b/justfile @@ -1,6 +1,8 @@ zig:="zig-0.14.0" +out:=".dim-out" + default: install test install: @@ -23,8 +25,8 @@ behaviour-tests: \ (behaviour-test "tests/part/mbr/basic-single-part-unsized.dis") behaviour-test script: install - ./zig-out/bin/dim --output .zig-cache/disk.img --script "{{script}}" --size 30M - + @mkdir -p {{ join(out, parent_directory(script)) }} + ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 30M fuzz: {{zig}} build install test --fuzz --port 35991 diff --git a/src/components/FillData.zig b/src/components/FillData.zig index 6bd1e2a..82b8cb4 100644 --- a/src/components/FillData.zig +++ b/src/components/FillData.zig @@ -22,6 +22,6 @@ pub fn parse(ctx: dim.Context) !dim.Content { fn render(self: *FillData, stream: *dim.BinaryStream) dim.Content.RenderError!void { try stream.writer().writeByteNTimes( self.fill_value, - stream.capacity, + stream.length, ); } diff --git a/src/dim.zig b/src/dim.zig index c445dd1..c99c1f2 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -133,9 +133,7 @@ pub fn main() !u8 { try output_file.file.setEndPos(size_limit); - var stream = BinaryStream{ - .capacity = size_limit, - }; + var stream: BinaryStream = .init_file(output_file.file, size_limit); try root_content.render(&stream); @@ -338,7 +336,7 @@ const Environment = struct { /// /// pub const Content = struct { - pub const RenderError = FileName.OpenError || FileHandle.ReadError || error{WriteError}; + pub const RenderError = FileName.OpenError || FileHandle.ReadError || BinaryStream.WriteError; pub const GuessError = FileName.GetSizeError; obj: *anyopaque, @@ -509,26 +507,117 @@ pub const FileHandle = struct { }; pub const BinaryStream = struct { - pub const WriteError = error{IoError}; + pub const WriteError = error{ Overflow, IoError }; pub const Writer = std.io.Writer(*BinaryStream, WriteError, write_some); + backing: Backing, + + virtual_offset: u64 = 0, + /// Max number of bytes that can be written - capacity: u64, + length: u64, + + /// Constructs a BinaryStream from a slice. + pub fn init_buffer(data: []u8) BinaryStream { + return .{ + .backing = .{ .buffer = data.ptr }, + .length = data.len, + }; + } + + /// Constructs a BinaryStream from a file. + pub fn init_file(file: std.fs.File, max_len: u64) BinaryStream { + return .{ + .backing = .{ + .file = .{ + .file = file, + .base = 0, + }, + }, + .length = max_len, + }; + } + + /// Returns a view into the stream. + pub fn slice(bs: BinaryStream, offset: u64, length: ?u64) error{OutOfBounds}!BinaryStream { + if (offset > bs.length) + return error.OutOfBounds; + const true_length = length or bs.length - offset; + if (true_length > bs.length) + return error.OutOfBounds; + + return .{ + .length = true_length, + .backing = switch (bs.backing) { + .buffer => |old| .{ .buffer = old + offset }, + .file => |old| .{ + .file = old.file, + .base = old.base + offset, + }, + }, + }; + } + + pub fn write(bs: *BinaryStream, offset: u64, data: []const u8) WriteError!void { + const end_pos = offset + data.len; + if (end_pos > bs.length) + return error.Overflow; + + switch (bs.backing) { + .buffer => |ptr| @memcpy(ptr[@intCast(offset)..][0..data.len], data), + .file => |state| { + state.file.seekTo(state.base + offset) catch return error.IoError; + state.file.writeAll(data) catch |err| switch (err) { + error.DiskQuota, error.NoSpaceLeft, error.FileTooBig => return error.Overflow, + + error.InputOutput, + error.DeviceBusy, + error.InvalidArgument, + error.AccessDenied, + error.BrokenPipe, + error.SystemResources, + error.OperationAborted, + error.NotOpenForWriting, + error.LockViolation, + error.WouldBlock, + error.ConnectionResetByPeer, + error.ProcessNotFound, + error.NoDevice, + error.Unexpected, + => return error.IoError, + }; + }, + } + } + + pub fn seek_to(bs: *BinaryStream, offset: u64) error{OutOfBounds}!void { + if (offset > bs.length) + return error.OutOfBounds; + bs.virtual_offset = offset; + } pub fn writer(bs: *BinaryStream) Writer { return .{ .context = bs }; } fn write_some(stream: *BinaryStream, data: []const u8) WriteError!usize { - _ = stream; - // TODO: Implement write_some! + const remaining_len = stream.length - stream.virtual_offset; + + const written_len: usize = @intCast(@min(remaining_len, data.len)); - // std.debug.print("dummy write of '{}'\n", .{ - // std.fmt.fmtSliceHexUpper(data), - // }); + try stream.write(stream.virtual_offset, data[0..written_len]); + stream.virtual_offset += written_len; - return data.len; + return written_len; } + + pub const Backing = union(enum) { + file: struct { + file: std.fs.File, + base: u64, + }, + buffer: [*]u8, + }; }; test { From 11720b0b4e4c65f11469c8bca7b63c05bc49c5d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Tue, 11 Mar 2025 13:02:47 +0100 Subject: [PATCH 14/26] Implements MBR partition writing --- justfile | 2 + src/components/part/MbrPartitionTable.zig | 243 ++++++++++++---------- src/dim.zig | 13 +- 3 files changed, 142 insertions(+), 116 deletions(-) diff --git a/justfile b/justfile index 373f1d9..a033bc2 100644 --- a/justfile +++ b/justfile @@ -28,5 +28,7 @@ behaviour-test script: install @mkdir -p {{ join(out, parent_directory(script)) }} ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 30M +# TODO(fqu): sfdisk --json .dim-out/tests/part/mbr/basic-single-part-unsized.img + fuzz: {{zig}} build install test --fuzz --port 35991 diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig index 0db62ca..568afa4 100644 --- a/src/components/part/MbrPartitionTable.zig +++ b/src/components/part/MbrPartitionTable.zig @@ -7,6 +7,8 @@ const dim = @import("../../dim.zig"); const PartTable = @This(); +const block_size = 512; + bootloader: ?dim.Content, disk_id: ?u32, partitions: [4]?Partition, @@ -41,7 +43,7 @@ pub fn parse(ctx: dim.Context) !dim.Content { pf.bootloader = bootloader_content; }, .ignore => { - pf.partitions[next_part_id] = .unused; + pf.partitions[next_part_id] = null; next_part_id += 1; }, .part => { @@ -58,6 +60,22 @@ pub fn parse(ctx: dim.Context) !dim.Content { try ctx.report_nonfatal_error("MBR partition {} does not have a size, but is not last.", .{prev}); } } + + var all_auto = true; + var all_manual = true; + for (pf.partitions) |part_or_null| { + const part = part_or_null orelse continue; + + if (part.offset != null) { + all_auto = false; + } else { + all_manual = false; + } + } + + if (!all_auto and !all_manual) { + try ctx.report_nonfatal_error("not all partitions have an explicit offset!", .{}); + } } return .create_handle(pf, .create(PartTable, .{ @@ -104,120 +122,121 @@ fn parse_partition(ctx: dim.Context) !Partition { return part; } -fn render(self: *PartTable, stream: *dim.BinaryStream) dim.Content.RenderError!void { - _ = self; - _ = stream; -} - -// .mbr => |table| { // MbrTable -// { -// var boot_sector: [512]u8 = .{0} ** 512; - -// @memcpy(boot_sector[0..table.bootloader.len], &table.bootloader); - -// std.mem.writeInt(u32, boot_sector[0x1B8..0x1BC], if (table.disk_id) |disk_id| disk_id else 0x0000_0000, .little); -// std.mem.writeInt(u16, boot_sector[0x1BC..0x1BE], 0x0000, .little); - -// var all_auto = true; -// var all_manual = true; -// for (table.partitions) |part_or_null| { -// const part = part_or_null orelse continue; - -// if (part.offset != null) { -// all_auto = false; -// } else { -// all_manual = false; -// } -// } - -// if (!all_auto and !all_manual) { -// std.log.err("{s}: not all partitions have an explicit offset!", .{context.slice()}); -// return error.InvalidSectorBoundary; -// } - -// const part_base = 0x01BE; -// var auto_offset: u64 = 2048; -// for (table.partitions, 0..) |part_or_null, part_id| { -// const reset_len = context.len; -// defer context.len = reset_len; - -// var buffer: [64]u8 = undefined; -// context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); - -// const desc = boot_sector[part_base + 16 * part_id ..][0..16]; - -// if (part_or_null) |part| { -// // https://wiki.osdev.org/MBR#Partition_table_entry_format - -// const part_offset = part.offset orelse auto_offset; - -// if ((part_offset % 512) != 0) { -// std.log.err("{s}: .offset is not divisible by 512!", .{context.slice()}); -// return error.InvalidSectorBoundary; -// } -// if ((part.size % 512) != 0) { -// std.log.err("{s}: .size is not divisible by 512!", .{context.slice()}); -// return error.InvalidSectorBoundary; -// } - -// const lba_u64 = @divExact(part_offset, 512); -// const size_u64 = @divExact(part.size, 512); - -// const lba = std.math.cast(u32, lba_u64) orelse { -// std.log.err("{s}: .offset is out of bounds!", .{context.slice()}); -// return error.InvalidSectorBoundary; -// }; -// const size = std.math.cast(u32, size_u64) orelse { -// std.log.err("{s}: .size is out of bounds!", .{context.slice()}); -// return error.InvalidSectorBoundary; -// }; - -// desc[0] = if (part.bootable) 0x80 else 0x00; - -// desc[1..4].* = mbr.encodeMbrChsEntry(lba); // chs_start -// desc[4] = @intFromEnum(part.type); -// desc[5..8].* = mbr.encodeMbrChsEntry(lba + size - 1); // chs_end -// std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start -// std.mem.writeInt(u32, desc[12..16], size, .little); // block_count - -// auto_offset += part.size; -// } else { -// @memset(desc, 0); // inactive -// } -// } -// boot_sector[0x01FE] = 0x55; -// boot_sector[0x01FF] = 0xAA; - -// try disk.handle.writeAll(&boot_sector); -// } - -// { -// var auto_offset: u64 = 2048; -// for (table.partitions, 0..) |part_or_null, part_id| { -// const part = part_or_null orelse continue; - -// const reset_len = context.len; -// defer context.len = reset_len; - -// var buffer: [64]u8 = undefined; -// context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); - -// try writeDiskImage(b, asking, disk, base + auto_offset, part.size, part.data, context); - -// auto_offset += part.size; -// } -// } -// }, +fn render(table: *PartTable, stream: *dim.BinaryStream) dim.Content.RenderError!void { + const last_part_id = blk: { + var last: usize = 0; + for (table.partitions, 0..) |p, i| { + if (p != null) + last = i; + } + break :blk last; + }; -pub const Partition = struct { - pub const unused: Partition = .{ - .offset = null, - .size = 0, - .bootable = false, - .type = .empty, - .contains = .empty, + const PartInfo = struct { + offset: u64, + size: u64, }; + var part_infos: [4]?PartInfo = @splat(null); + + // Compute and write boot sector, based on the follow: + // - https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout + { + var boot_sector: [block_size]u8 = @splat(0); + + if (table.bootloader) |bootloader| { + var sector: dim.BinaryStream = .init_buffer(&boot_sector); + + try bootloader.render(§or); + + const upper_limit: u64 = if (table.disk_id != null) + 0x01B8 + else + 0x1BE; + + if (sector.virtual_offset >= upper_limit) { + // TODO(fqu): Emit warning diagnostics here that parts of the bootloader will be overwritten by the MBR data. + } + } + + if (table.disk_id) |disk_id| { + std.mem.writeInt(u32, boot_sector[0x1B8..0x1BC], disk_id, .little); + } + // TODO(fqu): Implement "0x5A5A if copy-protected" + std.mem.writeInt(u16, boot_sector[0x1BC..0x1BE], 0x0000, .little); + + const part_base = 0x01BE; + var auto_offset: u64 = 2048 * block_size; // TODO(fqu): Make this configurable by allowing `offset` on the first partition, but still allow auto-layouting + for (table.partitions, &part_infos, 0..) |part_or_null, *pinfo, part_id| { + const desc: *[16]u8 = boot_sector[part_base + 16 * part_id ..][0..16]; + + // Initialize to "inactive" state + desc.* = @splat(0); + pinfo.* = null; + + if (part_or_null) |part| { + // https://wiki.osdev.org/MBR#Partition_table_entry_format + + const part_offset = part.offset orelse auto_offset; + const part_size = part.size orelse if (part_id == last_part_id) + std.mem.alignBackward(u64, stream.length - part_offset, block_size) + else + return error.ConfigurationError; + + pinfo.* = .{ + .offset = part_offset, + .size = part_size, + }; + + if ((part_offset % block_size) != 0) { + std.log.err("partition offset is not divisible by {}!", .{block_size}); + return error.ConfigurationError; + } + if ((part_size % block_size) != 0) { + std.log.err("partition size is not divisible by {}!", .{block_size}); + return error.ConfigurationError; + } + + const lba_u64 = @divExact(part_offset, block_size); + const size_u64 = @divExact(part_size, block_size); + + const lba = std.math.cast(u32, lba_u64) orelse { + std.log.err("partition offset is out of bounds!", .{}); + return error.ConfigurationError; + }; + const size = std.math.cast(u32, size_u64) orelse { + std.log.err("partition size is out of bounds!", .{}); + return error.ConfigurationError; + }; + + desc[0] = if (part.bootable) 0x80 else 0x00; + + desc[1..4].* = encodeMbrChsEntry(lba); // chs_start + desc[4] = @intFromEnum(part.type); + desc[5..8].* = encodeMbrChsEntry(lba + size - 1); // chs_end + std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start + std.mem.writeInt(u32, desc[12..16], size, .little); // block_count + + auto_offset += part_size; + } + } + boot_sector[0x01FE] = 0x55; + boot_sector[0x01FF] = 0xAA; + + try stream.write(0, &boot_sector); + } + + for (part_infos, table.partitions) |maybe_info, maybe_part| { + const part = maybe_part orelse continue; + const info = maybe_info orelse unreachable; + + var sub_view = try stream.slice(info.offset, info.size); + + try part.contains.render(&sub_view); + } +} + +pub const Partition = struct { offset: ?u64 = null, size: ?u64, diff --git a/src/dim.zig b/src/dim.zig index c99c1f2..546c9fd 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -336,7 +336,10 @@ const Environment = struct { /// /// pub const Content = struct { - pub const RenderError = FileName.OpenError || FileHandle.ReadError || BinaryStream.WriteError; + pub const RenderError = FileName.OpenError || FileHandle.ReadError || BinaryStream.WriteError || error{ + ConfigurationError, + OutOfBounds, + }; pub const GuessError = FileName.GetSizeError; obj: *anyopaque, @@ -542,7 +545,7 @@ pub const BinaryStream = struct { pub fn slice(bs: BinaryStream, offset: u64, length: ?u64) error{OutOfBounds}!BinaryStream { if (offset > bs.length) return error.OutOfBounds; - const true_length = length or bs.length - offset; + const true_length = length orelse bs.length - offset; if (true_length > bs.length) return error.OutOfBounds; @@ -551,8 +554,10 @@ pub const BinaryStream = struct { .backing = switch (bs.backing) { .buffer => |old| .{ .buffer = old + offset }, .file => |old| .{ - .file = old.file, - .base = old.base + offset, + .file = .{ + .file = old.file, + .base = old.base + offset, + }, }, }, }; From 1240f85345a8d5b8b87a4f22eb486f1d8825ff95 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki <der.teufel.mail@gmail.com> Date: Tue, 11 Mar 2025 13:56:46 +0100 Subject: [PATCH 15/26] Update help message and behaviour tests --- build.zig | 21 +++++++-------------- src/dim.zig | 4 ++-- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/build.zig b/build.zig index 9f078e2..d2b1746 100644 --- a/build.zig +++ b/build.zig @@ -58,20 +58,13 @@ pub fn build(b: *std.Build) void { std.mem.replaceScalar(u8, step_name, '/', '-'); const script_test = b.step(step_name, b.fmt("Run {s} behaviour test", .{script})); - const run_sizeless = b.addRunArtifact(dim_exe); - run_sizeless.addArg("--output"); - _ = run_sizeless.addOutputFileArg("disk.img"); - run_sizeless.addArg("--script"); - run_sizeless.addFileArg(b.path(script)); - script_test.dependOn(&run_sizeless.step); - - const run_with_size = b.addRunArtifact(dim_exe); - run_with_size.addArg("--output"); - _ = run_with_size.addOutputFileArg("disk.img"); - run_with_size.addArg("--script"); - run_with_size.addFileArg(b.path(script)); - run_with_size.addArgs(&.{ "--size", "30M" }); - script_test.dependOn(&run_with_size.step); + const run_behaviour = b.addRunArtifact(dim_exe); + run_behaviour.addArg("--output"); + _ = run_behaviour.addOutputFileArg("disk.img"); + run_behaviour.addArg("--script"); + run_behaviour.addFileArg(b.path(script)); + run_behaviour.addArgs(&.{ "--size", "30M" }); + script_test.dependOn(&run_behaviour.step); behaviour_tests_step.dependOn(script_test); } diff --git a/src/dim.zig b/src/dim.zig index 546c9fd..462adb2 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -22,8 +22,8 @@ const usage = \\OPTIONS: \\ --output <path> \\ mandatory: where to store the output file - \\[--size <size>] - \\ optional: how big is the resulting disk image? allowed suffixes: k,K,M,G + \\ --size <size> + \\ mandatory: how big is the resulting disk image? allowed suffixes: k,K,M,G \\ --script <path> \\ mandatory: which script file to execute? \\[--import-env] From a6ad692633739d68627a768c9582424e64b93038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Thu, 13 Mar 2025 13:43:52 +0100 Subject: [PATCH 16/26] Implements parsing of VFAT file systems --- concept/script.dis | 6 +- data/rootfs.dis | 10 ++ {dummy => data/rootfs}/README.md | 0 {dummy => data/rootfs}/Windows/explorer.exe | 0 .../rootfs}/Windows/system32/calc.exe | 0 justfile | 2 +- src/Parser.zig | 38 +++---- src/components/fs/FatFileSystem.zig | 61 ++++++++++- src/components/fs/common.zig | 100 ++++++++++++++++++ src/dim.zig | 12 ++- tests/fs/fat32.dis | 4 + 11 files changed, 202 insertions(+), 31 deletions(-) create mode 100644 data/rootfs.dis rename {dummy => data/rootfs}/README.md (100%) rename {dummy => data/rootfs}/Windows/explorer.exe (100%) rename {dummy => data/rootfs}/Windows/system32/calc.exe (100%) create mode 100644 src/components/fs/common.zig create mode 100644 tests/fs/fat32.dis diff --git a/concept/script.dis b/concept/script.dis index b7ac790..997469e 100644 --- a/concept/script.dis +++ b/concept/script.dis @@ -5,10 +5,10 @@ mbr-part size 500M bootable contents - fat fat32 + vfat fat32 label AshetOS - add-dir ../../rootfs . - add-dir $PATH2 . + copy-dir ../../rootfs . + copy-dir $PATH2 . copy-file $PATH3 apps/hello-world.ashex copy-file $PATH3 apps/hello-gui.ashex copy-file $PATH4 apps/clock.ashex diff --git a/data/rootfs.dis b/data/rootfs.dis new file mode 100644 index 0000000..3a06dbf --- /dev/null +++ b/data/rootfs.dis @@ -0,0 +1,10 @@ +mkdir /boot/EFI/refind/icons +mkdir /boot/EFI/nixos/.extra-files/ +mkdir /Users/xq/ + +# copy-XXX uses <dst> <src> syntax as it's consistent with other paths +copy-dir /Windows ./dummy/Windows +copy-file /Users/xq/README.md ./dummy/README.md + +# create-file <path> <size> <contents> creates nested data +create-file /Users/xq/blob.data 512k fill 0x70 diff --git a/dummy/README.md b/data/rootfs/README.md similarity index 100% rename from dummy/README.md rename to data/rootfs/README.md diff --git a/dummy/Windows/explorer.exe b/data/rootfs/Windows/explorer.exe similarity index 100% rename from dummy/Windows/explorer.exe rename to data/rootfs/Windows/explorer.exe diff --git a/dummy/Windows/system32/calc.exe b/data/rootfs/Windows/system32/calc.exe similarity index 100% rename from dummy/Windows/system32/calc.exe rename to data/rootfs/Windows/system32/calc.exe diff --git a/justfile b/justfile index a033bc2..5a12bcc 100644 --- a/justfile +++ b/justfile @@ -22,7 +22,7 @@ behaviour-tests: \ (behaviour-test "tests/part/mbr/minimal.dis") \ (behaviour-test "tests/part/mbr/no-part-bootloader.dis") \ (behaviour-test "tests/part/mbr/basic-single-part-sized.dis") \ - (behaviour-test "tests/part/mbr/basic-single-part-unsized.dis") + (behaviour-test "tests/fs/fat32.dis") behaviour-test script: install @mkdir -p {{ join(out, parent_directory(script)) }} diff --git a/src/Parser.zig b/src/Parser.zig index 75c841d..d4c35bc 100644 --- a/src/Parser.zig +++ b/src/Parser.zig @@ -34,10 +34,10 @@ pub const IO = struct { const File = struct { path: []const u8, tokenizer: Tokenizer, - free: bool, }; allocator: std.mem.Allocator, +arena: std.heap.ArenaAllocator, io: *const IO, file_stack: []File, @@ -50,6 +50,7 @@ pub fn init(allocator: std.mem.Allocator, io: *const IO, options: InitOptions) e var slice = try allocator.alloc(File, options.max_include_depth); slice.len = 0; return .{ + .arena = std.heap.ArenaAllocator.init(allocator), .allocator = allocator, .io = io, .max_include_depth = options.max_include_depth, @@ -58,14 +59,9 @@ pub fn init(allocator: std.mem.Allocator, io: *const IO, options: InitOptions) e } pub fn deinit(parser: *Parser) void { - for (parser.file_stack) |file| { - if (file.free) { - parser.allocator.free(file.path); - parser.allocator.free(file.tokenizer.source); - } - } parser.file_stack.len = parser.max_include_depth; parser.allocator.free(parser.file_stack); + parser.arena.deinit(); parser.* = undefined; } @@ -83,15 +79,13 @@ pub fn push_source(parser: *Parser, options: struct { parser.file_stack[index] = .{ .path = options.path, .tokenizer = .init(options.contents), - .free = false, }; } pub fn push_file(parser: *Parser, include_path: []const u8) !void { - const abs_include_path = try parser.get_include_path(parser.allocator, include_path); + const abs_include_path = try parser.get_include_path(parser.arena.allocator(), include_path); - const file_contents = try parser.io.fetch_file(parser.allocator, abs_include_path); - errdefer parser.allocator.free(file_contents); + const file_contents = try parser.io.fetch_file(parser.arena.allocator(), abs_include_path); const index = parser.file_stack.len; parser.file_stack.len += 1; @@ -99,7 +93,6 @@ pub fn push_file(parser: *Parser, include_path: []const u8) !void { parser.file_stack[index] = .{ .path = abs_include_path, .tokenizer = .init(file_contents), - .free = true, }; } @@ -133,16 +126,15 @@ pub fn next(parser: *Parser) (Error || error{UnexpectedEndOfFile})![]const u8 { } pub fn next_or_eof(parser: *Parser) Error!?[]const u8 { - if (parser.file_stack.len == 0) - return null; - - while (true) { + fetch_loop: while (parser.file_stack.len > 0) { const top = &parser.file_stack[parser.file_stack.len - 1]; - const token = if (try fetch_token(&top.tokenizer)) |tok| - tok - else - return null; + const token = (try fetch_token(&top.tokenizer)) orelse { + // we exhausted tokens in the current file, pop the stack and continue + // on lower file + parser.file_stack.len -= 1; + continue :fetch_loop; + }; switch (token.type) { .whitespace, .comment => unreachable, @@ -175,6 +167,8 @@ pub fn next_or_eof(parser: *Parser) Error!?[]const u8 { }, } } + + return null; } fn fetch_token(tok: *Tokenizer) Tokenizer.Error!?Token { @@ -337,7 +331,7 @@ test "parser with variables and include files" { .contents = \\select-disk $DISK \\!include "../parent/kernel.script" - \\ + \\end-of sequence , }); @@ -347,6 +341,8 @@ test "parser with variables and include files" { "copy-file", "./zig-out/bin/kernel.elf", "/BOOT/vzlinuz", + "end-of", + "sequence", }; for (sequence) |item| { diff --git a/src/components/fs/FatFileSystem.zig b/src/components/fs/FatFileSystem.zig index 24350e2..59a4f20 100644 --- a/src/components/fs/FatFileSystem.zig +++ b/src/components/fs/FatFileSystem.zig @@ -1,7 +1,62 @@ const std = @import("std"); const dim = @import("../../dim.zig"); +const common = @import("common.zig"); -pub fn execute(ctx: dim.Context) !void { - _ = ctx; - @panic("fat not implemented yet!"); +const FAT = @This(); + +format_as: FatType, +label: ?[]const u8 = null, +ops: std.ArrayList(common.FsOperation), + +pub fn parse(ctx: dim.Context) !dim.Content { + const fat_type = try ctx.parse_enum(FatType); + + const pf = try ctx.alloc_object(FAT); + pf.* = .{ + .format_as = fat_type, + .ops = .init(ctx.get_arena()), + }; + + try common.parse_ops( + ctx, + "endfat", + Appender{ .fat = pf }, + ); + + return .create_handle(pf, .create(@This(), .{ + .render_fn = render, + })); } + +const Appender = struct { + fat: *FAT, + + pub fn append_common_op(self: @This(), op: common.FsOperation) !void { + try self.fat.ops.append(op); + } + + pub fn parse_custom_op(self: @This(), ctx: dim.Context, str_op: []const u8) !void { + const Op = enum { label }; + const op = std.meta.stringToEnum(Op, str_op) orelse return ctx.report_fatal_error( + "Unknown file system operation '{s}'", + .{str_op}, + ); + switch (op) { + .label => { + self.fat.label = try ctx.parse_string(); + }, + } + } +}; + +fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { + _ = self; + _ = stream; +} + +const FatType = enum { + fat12, + fat16, + fat32, + exfat, +}; diff --git a/src/components/fs/common.zig b/src/components/fs/common.zig new file mode 100644 index 0000000..f45f698 --- /dev/null +++ b/src/components/fs/common.zig @@ -0,0 +1,100 @@ +//! +//! This file contains a common base implementation which should be valid for +//! all typical path based file systems. +//! +const std = @import("std"); +const dim = @import("../../dim.zig"); + +pub const FsOperation = union(enum) { + copy_file: struct { + path: []const u8, + source: dim.FileName, + }, + + copy_dir: struct { + path: []const u8, + source: dim.FileName, + }, + + make_dir: struct { + path: []const u8, + }, + + create_file: struct { + path: []const u8, + size: u64, + contents: dim.Content, + }, +}; + +fn parse_path(ctx: dim.Context) ![]const u8 { + const path = try ctx.parse_string(); + + if (path.len == 0) { + try ctx.report_nonfatal_error("Path cannot be empty!", .{}); + return path; + } + + if (!std.mem.startsWith(u8, path, "/")) { + try ctx.report_nonfatal_error("Path '{}' did not start with a \"/\"", .{ + std.zig.fmtEscapes(path), + }); + } + + for (path) |c| { + if (c < 0x20 or c == 0x7F or c == '\\') { + try ctx.report_nonfatal_error("Path '{}' contains invalid character 0x{X:0>2}", .{ + std.zig.fmtEscapes(path), + c, + }); + } + } + + _ = std.unicode.Utf8View.init(path) catch |err| { + try ctx.report_nonfatal_error("Path '{}' is not a valid UTF-8 string: {s}", .{ + std.zig.fmtEscapes(path), + @errorName(err), + }); + }; + + return path; +} + +pub fn parse_ops(ctx: dim.Context, end_seq: []const u8, handler: anytype) !void { + while (true) { + const opsel = try ctx.parse_string(); + if (std.mem.eql(u8, opsel, end_seq)) + return; + + if (std.mem.eql(u8, opsel, "mkdir")) { + const path = try parse_path(ctx); + try handler.append_common_op(FsOperation{ + .make_dir = .{ .path = path }, + }); + } else if (std.mem.eql(u8, opsel, "copy-dir")) { + const path = try parse_path(ctx); + const src = try ctx.parse_file_name(); + + try handler.append_common_op(FsOperation{ + .copy_dir = .{ .path = path, .source = src }, + }); + } else if (std.mem.eql(u8, opsel, "copy-file")) { + const path = try parse_path(ctx); + const src = try ctx.parse_file_name(); + + try handler.append_common_op(FsOperation{ + .copy_file = .{ .path = path, .source = src }, + }); + } else if (std.mem.eql(u8, opsel, "create-file")) { + const path = try parse_path(ctx); + const size = try ctx.parse_mem_size(); + const contents = try ctx.parse_content(); + + try handler.append_common_op(FsOperation{ + .create_file = .{ .path = path, .size = size, .contents = contents }, + }); + } else { + try handler.parse_custom_op(ctx, opsel); + } + } +} diff --git a/src/dim.zig b/src/dim.zig index 462adb2..a360362 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -152,7 +152,7 @@ fn fatal(msg: []const u8) noreturn { const content_types: []const struct { []const u8, type } = &.{ .{ "mbr-part", @import("components/part/MbrPartitionTable.zig") }, // .{ "gpt-part", @import("components/part/GptPartitionTable.zig") }, - // .{ "fat", @import("components/fs/FatFileSystem.zig") }, + .{ "vfat", @import("components/fs/FatFileSystem.zig") }, .{ "paste-file", @import("components/PasteFile.zig") }, .{ "empty", @import("components/EmptyData.zig") }, .{ "fill", @import("components/FillData.zig") }, @@ -161,6 +161,10 @@ const content_types: []const struct { []const u8, type } = &.{ pub const Context = struct { env: *Environment, + pub fn get_arena(ctx: Context) std.mem.Allocator { + return ctx.env.arena; + } + pub fn alloc_object(ctx: Context, comptime T: type) error{OutOfMemory}!*T { return try ctx.env.arena.create(T); } @@ -175,7 +179,9 @@ pub const Context = struct { } pub fn parse_string(ctx: Context) Environment.ParseError![]const u8 { - return ctx.env.parser.next(); + const str = try ctx.env.parser.next(); + // std.debug.print("token: '{}'\n", .{std.zig.fmtEscapes(str)}); + return str; } pub fn parse_file_name(ctx: Context) Environment.ParseError!FileName { @@ -640,7 +646,7 @@ const DiskSize = enum(u64) { _, pub fn parse(str: []const u8) error{ InvalidSize, Overflow }!DiskSize { - const suffix_scaling: ?u64 = if (std.mem.endsWith(u8, str, "K")) + const suffix_scaling: ?u64 = if (std.mem.endsWith(u8, str, "K") or std.mem.endsWith(u8, str, "k")) KiB else if (std.mem.endsWith(u8, str, "M")) MiB diff --git a/tests/fs/fat32.dis b/tests/fs/fat32.dis new file mode 100644 index 0000000..cc4bf66 --- /dev/null +++ b/tests/fs/fat32.dis @@ -0,0 +1,4 @@ +vfat fat32 + label "Demo FS" + !include ../../data/rootfs.dis +endfat From 46d4e59eba53d4687d86ccc177985329524542ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Thu, 13 Mar 2025 14:25:08 +0100 Subject: [PATCH 17/26] Implements basic FAT formatting, but does not write files yet. --- build.zig | 34 ++--- justfile | 4 +- src/components/fs/FatFileSystem.zig | 189 +++++++++++++++++++++++++++- src/components/fs/common.zig | 18 +++ src/dim.zig | 51 ++++++-- src/mkfs.fat.zig | 136 -------------------- tests/fs/fat12.dis | 4 + tests/fs/fat16.dis | 4 + 8 files changed, 272 insertions(+), 168 deletions(-) delete mode 100644 src/mkfs.fat.zig create mode 100644 tests/fs/fat12.dis create mode 100644 tests/fs/fat16.dis diff --git a/build.zig b/build.zig index d2b1746..6cd4d6b 100644 --- a/build.zig +++ b/build.zig @@ -7,28 +7,18 @@ pub fn build(b: *std.Build) void { const test_step = b.step("test", "Runs the test suite."); - // // Dependency Setup: - // const zfat_dep = b.dependency("zfat", .{ - // // .max_long_name_len = 121, - // .code_page = .us, - // .@"volume-count" = @as(u32, 1), - // .@"sector-size" = @as(u32, 512), - // // .rtc = .dynamic, - // .mkfs = true, - // .exfat = true, - // }); - - // const zfat_mod = zfat_dep.module("zfat"); + // Dependency Setup: + const zfat_dep = b.dependency("zfat", .{ + // .max_long_name_len = 121, + .code_page = .us, + .@"volume-count" = @as(u32, 1), + .@"sector-size" = @as(u32, 512), + // .rtc = .dynamic, + .mkfs = true, + .exfat = true, + }); - // const mkfs_fat = b.addExecutable(.{ - // .name = "mkfs.fat", - // .target = b.graph.host, - // .optimize = .ReleaseSafe, - // .root_source_file = b.path("src/mkfs.fat.zig"), - // }); - // mkfs_fat.root_module.addImport("fat", zfat_mod); - // mkfs_fat.linkLibC(); - // b.installArtifact(mkfs_fat); + const zfat_mod = zfat_dep.module("zfat"); const args_dep = b.dependency("args", .{}); const args_mod = args_dep.module("args"); @@ -37,8 +27,10 @@ pub fn build(b: *std.Build) void { .root_source_file = b.path("src/dim.zig"), .target = target, .optimize = optimize, + .link_libc = true, }); dim_mod.addImport("args", args_mod); + dim_mod.addImport("zfat", zfat_mod); const dim_exe = b.addExecutable(.{ .name = "dim", diff --git a/justfile b/justfile index 5a12bcc..1e779d4 100644 --- a/justfile +++ b/justfile @@ -22,11 +22,13 @@ behaviour-tests: \ (behaviour-test "tests/part/mbr/minimal.dis") \ (behaviour-test "tests/part/mbr/no-part-bootloader.dis") \ (behaviour-test "tests/part/mbr/basic-single-part-sized.dis") \ + (behaviour-test "tests/fs/fat12.dis") \ + (behaviour-test "tests/fs/fat16.dis") \ (behaviour-test "tests/fs/fat32.dis") behaviour-test script: install @mkdir -p {{ join(out, parent_directory(script)) }} - ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 30M + ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 33M # TODO(fqu): sfdisk --json .dim-out/tests/part/mbr/basic-single-part-unsized.img diff --git a/src/components/fs/FatFileSystem.zig b/src/components/fs/FatFileSystem.zig index 59a4f20..d818e15 100644 --- a/src/components/fs/FatFileSystem.zig +++ b/src/components/fs/FatFileSystem.zig @@ -2,6 +2,11 @@ const std = @import("std"); const dim = @import("../../dim.zig"); const common = @import("common.zig"); +const fatfs = @import("zfat"); + +const block_size = 512; +const max_path_len = 8192; // this should be enough + const FAT = @This(); format_as: FatType, @@ -50,13 +55,191 @@ const Appender = struct { }; fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { - _ = self; - _ = stream; + var bsd: BinaryStreamDisk = .{ .stream = stream }; + + const min_size, const max_size = self.format_as.get_size_limits(); + + if (stream.length < min_size) { + // TODO(fqu): Report fatal erro! + std.log.err("cannot format {} bytes with {s}: min required size is {}", .{ + @as(dim.DiskSize, @enumFromInt(stream.length)), + @tagName(self.format_as), + @as(dim.DiskSize, @enumFromInt(min_size)), + }); + return; + } + + if (stream.length > max_size) { + // TODO(fqu): Report warning + std.log.warn("will not use all available space: available space is {}, but maximum size for {s} is {}", .{ + @as(dim.DiskSize, @enumFromInt(stream.length)), + @tagName(self.format_as), + @as(dim.DiskSize, @enumFromInt(min_size)), + }); + } + + var filesystem: fatfs.FileSystem = undefined; + + fatfs.disks[0] = &bsd.disk; + defer fatfs.disks[0] = null; + + var workspace: [8192]u8 = undefined; + fatfs.mkfs("0:", .{ + .filesystem = self.format_as.get_zfat_type(), + .fats = .two, + .sector_align = 0, // default/auto + .rootdir_size = 512, // randomly chosen, might need adjustment + .use_partitions = false, + }, &workspace) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.WriteProtected => @panic("bug in zfat"), + error.InvalidParameter => @panic("bug in zfat disk wrapper"), + error.DiskErr => return error.IoError, + error.NotReady => @panic("bug in zfat disk wrapper"), + error.InvalidDrive => @panic("bug in AtomicOps"), + error.MkfsAborted => return error.IoError, + }; + + const ops = self.ops.items; + if (ops.len > 0) { + filesystem.mount("0:", true) catch |err| switch (err) { + error.NotEnabled => @panic("bug in zfat"), + error.DiskErr => return error.IoError, + error.NotReady => @panic("bug in zfat disk wrapper"), + error.InvalidDrive => @panic("bug in AtomicOps"), + error.NoFilesystem => @panic("bug in zfat"), + }; + + const wrapper = AtomicOps{}; + + for (ops) |op| { + try op.execute(wrapper); + } + } } const FatType = enum { fat12, fat16, fat32, - exfat, + // exfat, + + fn get_zfat_type(fat: FatType) fatfs.DiskFormat { + return switch (fat) { + .fat12 => .fat, + .fat16 => .fat, + .fat32 => .fat32, + // .exfat => .exfat, + }; + } + + fn get_size_limits(fat: FatType) struct { u64, u64 } { + // see https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system#Size_limits + return switch (fat) { + .fat12 => .{ 512, 133_824_512 }, // 512 B ... 127 MB + .fat16 => .{ 2_091_520, 2_147_090_432 }, // 2042.5 kB ... 2047 MB + .fat32 => .{ 33_548_800, 1_099_511_578_624 }, // 32762.5 kB ... 1024 GB + }; + } +}; + +const AtomicOps = struct { + pub fn mkdir(ops: AtomicOps, path: []const u8) !void { + _ = ops; + + var path_buffer: [max_path_len:0]u8 = undefined; + var fba: std.heap.FixedBufferAllocator = .init(&path_buffer); + + const joined = try std.mem.concatWithSentinel(fba.allocator(), u8, &.{ "0:/", path }, 0); + fatfs.mkdir(joined) catch |err| switch (err) { + error.Exist => {}, // this is good + else => |e| return e, + }; + } + + pub fn mkfile(ops: AtomicOps, path: []const u8, host_file: std.fs.File) !void { + _ = ops; + + var path_buffer: [max_path_len:0]u8 = undefined; + if (path.len > path_buffer.len) + return error.InvalidPath; + @memcpy(path_buffer[0..path.len], path); + path_buffer[path.len] = 0; + + const path_z = path_buffer[0..path.len :0]; + + const stat = try host_file.stat(); + + const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig; + + _ = size; + + var fs_file = try fatfs.File.create(path_z); + defer fs_file.close(); + + var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); + try fifo.pump( + host_file.reader(), + fs_file.writer(), + ); + } +}; + +const BinaryStreamDisk = struct { + disk: fatfs.Disk = .{ + .getStatusFn = disk_getStatus, + .initializeFn = disk_initialize, + .readFn = disk_read, + .writeFn = disk_write, + .ioctlFn = disk_ioctl, + }, + stream: *dim.BinaryStream, + + fn disk_getStatus(intf: *fatfs.Disk) fatfs.Disk.Status { + _ = intf; + return .{ + .initialized = true, + .disk_present = true, + .write_protected = false, + }; + } + + fn disk_initialize(intf: *fatfs.Disk) fatfs.Disk.Error!fatfs.Disk.Status { + return disk_getStatus(intf); + } + + fn disk_read(intf: *fatfs.Disk, buff: [*]u8, sector: fatfs.LBA, count: c_uint) fatfs.Disk.Error!void { + const bsd: *BinaryStreamDisk = @fieldParentPtr("disk", intf); + + bsd.stream.read(block_size * sector, buff[0 .. count * block_size]) catch return error.IoError; + } + + fn disk_write(intf: *fatfs.Disk, buff: [*]const u8, sector: fatfs.LBA, count: c_uint) fatfs.Disk.Error!void { + const bsd: *BinaryStreamDisk = @fieldParentPtr("disk", intf); + + bsd.stream.write(block_size * sector, buff[0 .. count * block_size]) catch return error.IoError; + } + + fn disk_ioctl(intf: *fatfs.Disk, cmd: fatfs.IoCtl, buff: [*]u8) fatfs.Disk.Error!void { + const bsd: *BinaryStreamDisk = @fieldParentPtr("disk", intf); + + switch (cmd) { + .sync => {}, + + .get_sector_count => { + const size: *fatfs.LBA = @ptrCast(@alignCast(buff)); + size.* = @intCast(bsd.stream.length / block_size); + }, + .get_sector_size => { + const size: *fatfs.WORD = @ptrCast(@alignCast(buff)); + size.* = block_size; + }, + .get_block_size => { + const size: *fatfs.DWORD = @ptrCast(@alignCast(buff)); + size.* = 1; + }, + + else => return error.InvalidParameter, + } + } }; diff --git a/src/components/fs/common.zig b/src/components/fs/common.zig index f45f698..9982033 100644 --- a/src/components/fs/common.zig +++ b/src/components/fs/common.zig @@ -25,6 +25,24 @@ pub const FsOperation = union(enum) { size: u64, contents: dim.Content, }, + + pub fn execute(op: FsOperation, executor: anytype) !void { + _ = executor; + switch (op) { + .copy_file => |data| { + _ = data; + }, + .copy_dir => |data| { + _ = data; + }, + .make_dir => |data| { + _ = data; + }, + .create_file => |data| { + _ = data; + }, + } + } }; fn parse_path(ctx: dim.Context) ![]const u8 { diff --git a/src/dim.zig b/src/dim.zig index a360362..f9f8b89 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -7,6 +7,11 @@ const Tokenizer = @import("Tokenizer.zig"); const Parser = @import("Parser.zig"); const args = @import("args"); +comptime { + // Ensure zfat is linked to prevent compiler errors! + _ = @import("zfat"); +} + const max_script_size = 10 * DiskSize.MiB; const Options = struct { @@ -128,16 +133,14 @@ pub fn main() !u8 { } { - var output_file = try current_dir.atomicFile(output_path, .{}); - defer output_file.deinit(); + var output_file = try current_dir.createFile(output_path, .{ .read = true }); + defer output_file.close(); - try output_file.file.setEndPos(size_limit); + try output_file.setEndPos(size_limit); - var stream: BinaryStream = .init_file(output_file.file, size_limit); + var stream: BinaryStream = .init_file(output_file, size_limit); try root_content.render(&stream); - - try output_file.finish(); } return 0; @@ -345,6 +348,7 @@ pub const Content = struct { pub const RenderError = FileName.OpenError || FileHandle.ReadError || BinaryStream.WriteError || error{ ConfigurationError, OutOfBounds, + OutOfMemory, }; pub const GuessError = FileName.GetSizeError; @@ -517,6 +521,7 @@ pub const FileHandle = struct { pub const BinaryStream = struct { pub const WriteError = error{ Overflow, IoError }; + pub const ReadError = error{ Overflow, IoError }; pub const Writer = std.io.Writer(*BinaryStream, WriteError, write_some); backing: Backing, @@ -569,6 +574,38 @@ pub const BinaryStream = struct { }; } + pub fn read(bs: *BinaryStream, offset: u64, data: []u8) ReadError!void { + const end_pos = offset + data.len; + if (end_pos > bs.length) + return error.Overflow; + + switch (bs.backing) { + .buffer => |ptr| @memcpy(data, ptr[@intCast(offset)..][0..data.len]), + .file => |state| { + state.file.seekTo(state.base + offset) catch return error.IoError; + state.file.reader().readNoEof(data) catch |err| switch (err) { + error.InputOutput, + error.AccessDenied, + error.BrokenPipe, + error.SystemResources, + error.OperationAborted, + error.LockViolation, + error.WouldBlock, + error.ConnectionResetByPeer, + error.ProcessNotFound, + error.Unexpected, + error.IsDir, + error.ConnectionTimedOut, + error.NotOpenForReading, + error.SocketNotConnected, + error.Canceled, + error.EndOfStream, + => return error.IoError, + }; + }, + } + } + pub fn write(bs: *BinaryStream, offset: u64, data: []const u8) WriteError!void { const end_pos = offset + data.len; if (end_pos > bs.length) @@ -636,7 +673,7 @@ test { _ = Parser; } -const DiskSize = enum(u64) { +pub const DiskSize = enum(u64) { const KiB = 1024; const MiB = 1024 * 1024; const GiB = 1024 * 1024 * 1024; diff --git a/src/mkfs.fat.zig b/src/mkfs.fat.zig deleted file mode 100644 index 5f99349..0000000 --- a/src/mkfs.fat.zig +++ /dev/null @@ -1,136 +0,0 @@ -const std = @import("std"); -const fatfs = @import("fat"); -const shared = @import("shared.zig"); - -const App = shared.App(@This()); - -pub const main = App.main; - -pub const std_options: std.Options = .{ - .log_scope_levels = &.{ - .{ .scope = .fatfs, .level = .warn }, - }, -}; - -var fat_disk: fatfs.Disk = .{ - .getStatusFn = disk_getStatus, - .initializeFn = disk_initialize, - .readFn = disk_read, - .writeFn = disk_write, - .ioctlFn = disk_ioctl, -}; - -var filesystem_format: fatfs.DiskFormat = undefined; - -var filesystem: fatfs.FileSystem = undefined; - -const format_mapping = std.StaticStringMap(fatfs.DiskFormat).initComptime(&.{ - .{ "fat12", .fat }, - .{ "fat16", .fat }, - .{ "fat32", .fat32 }, - .{ "exfat", .exfat }, -}); - -pub fn init(file_system: []const u8) !void { - filesystem_format = format_mapping.get(file_system) orelse return error.InvalidFilesystem; - fatfs.disks[0] = &fat_disk; -} - -pub fn format() !void { - var workspace: [8192]u8 = undefined; - try fatfs.mkfs("0:", .{ - .filesystem = filesystem_format, - .fats = .two, - .sector_align = 0, // default/auto - .rootdir_size = 512, // randomly chosen, might need adjustment - .use_partitions = false, - }, &workspace); -} - -pub fn mount() !void { - try filesystem.mount("0:", true); -} - -pub fn mkdir(path: []const u8) !void { - const joined = try std.mem.concatWithSentinel(App.allocator, u8, &.{ "0:/", path }, 0); - fatfs.mkdir(joined) catch |err| switch (err) { - error.Exist => {}, // this is good - else => |e| return e, - }; -} - -pub fn mkfile(path: []const u8, host_file: std.fs.File) !void { - const path_z = try App.allocator.dupeZ(u8, path); - defer App.allocator.free(path_z); - - const stat = try host_file.stat(); - - const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig; - - _ = size; - - var fs_file = try fatfs.File.create(path_z); - defer fs_file.close(); - - var fifo = std.fifo.LinearFifo(u8, .{ .Static = 8192 }).init(); - try fifo.pump( - host_file.reader(), - fs_file.writer(), - ); -} - -fn disk_getStatus(intf: *fatfs.Disk) fatfs.Disk.Status { - _ = intf; - return .{ - .initialized = true, - .disk_present = true, - .write_protected = false, - }; -} - -fn disk_initialize(intf: *fatfs.Disk) fatfs.Disk.Error!fatfs.Disk.Status { - return disk_getStatus(intf); -} - -fn disk_read(intf: *fatfs.Disk, buff: [*]u8, sector: fatfs.LBA, count: c_uint) fatfs.Disk.Error!void { - _ = intf; - - const blocks = std.mem.bytesAsSlice(shared.Block, buff[0 .. count * shared.BlockDevice.block_size]); - for (blocks, 0..) |*block, i| { - block.* = App.device.read(sector + i) catch return error.IoError; - } -} - -fn disk_write(intf: *fatfs.Disk, buff: [*]const u8, sector: fatfs.LBA, count: c_uint) fatfs.Disk.Error!void { - _ = intf; - - const block_ptr = @as([*]const [512]u8, @ptrCast(buff)); - - var i: usize = 0; - while (i < count) : (i += 1) { - App.device.write(sector + i, block_ptr[i]) catch return error.IoError; - } -} - -fn disk_ioctl(intf: *fatfs.Disk, cmd: fatfs.IoCtl, buff: [*]u8) fatfs.Disk.Error!void { - _ = intf; - - switch (cmd) { - .sync => App.device.file.sync() catch return error.IoError, - - .get_sector_count => { - const size: *fatfs.LBA = @ptrCast(@alignCast(buff)); - size.* = @intCast(App.device.count); - }, - .get_sector_size => { - const size: *fatfs.WORD = @ptrCast(@alignCast(buff)); - size.* = 512; - }, - .get_block_size => { - const size: *fatfs.DWORD = @ptrCast(@alignCast(buff)); - size.* = 1; - }, - - else => return error.InvalidParameter, - } -} diff --git a/tests/fs/fat12.dis b/tests/fs/fat12.dis new file mode 100644 index 0000000..4b0352e --- /dev/null +++ b/tests/fs/fat12.dis @@ -0,0 +1,4 @@ +vfat fat12 + label "Demo FS" + !include ../../data/rootfs.dis +endfat diff --git a/tests/fs/fat16.dis b/tests/fs/fat16.dis new file mode 100644 index 0000000..84018df --- /dev/null +++ b/tests/fs/fat16.dis @@ -0,0 +1,4 @@ +vfat fat16 + label "Demo FS" + !include ../../data/rootfs.dis +endfat From 0501c0fbda8a41f838dea0364b515e5602a7310d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Thu, 13 Mar 2025 17:36:19 +0100 Subject: [PATCH 18/26] Adds more options to VFAT driver --- src/components/fs/FatFileSystem.zig | 55 +++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 14 deletions(-) diff --git a/src/components/fs/FatFileSystem.zig b/src/components/fs/FatFileSystem.zig index d818e15..ee7d899 100644 --- a/src/components/fs/FatFileSystem.zig +++ b/src/components/fs/FatFileSystem.zig @@ -11,7 +11,11 @@ const FAT = @This(); format_as: FatType, label: ?[]const u8 = null, +fats: ?fatfs.FatTables = null, +rootdir_size: ?c_uint = null, ops: std.ArrayList(common.FsOperation), +sector_align: ?c_uint = null, +cluster_size: ?u32 = null, pub fn parse(ctx: dim.Context) !dim.Content { const fat_type = try ctx.parse_enum(FatType); @@ -22,11 +26,14 @@ pub fn parse(ctx: dim.Context) !dim.Content { .ops = .init(ctx.get_arena()), }; - try common.parse_ops( - ctx, - "endfat", - Appender{ .fat = pf }, - ); + var appender: Appender = .{ + .fat = pf, + .updater = .init(ctx, pf), + }; + + try common.parse_ops(ctx, "endfat", &appender); + + try appender.updater.validate(); return .create_handle(pf, .create(@This(), .{ .render_fn = render, @@ -35,21 +42,40 @@ pub fn parse(ctx: dim.Context) !dim.Content { const Appender = struct { fat: *FAT, + updater: dim.FieldUpdater(FAT, &.{ + .fats, + .label, + .rootdir_size, + .sector_align, + .cluster_size, + + // cannot be accessed: + .format_as, + .ops, + }), pub fn append_common_op(self: @This(), op: common.FsOperation) !void { try self.fat.ops.append(op); } - pub fn parse_custom_op(self: @This(), ctx: dim.Context, str_op: []const u8) !void { - const Op = enum { label }; + pub fn parse_custom_op(self: *@This(), ctx: dim.Context, str_op: []const u8) !void { + const Op = enum { + label, + fats, + @"root-size", + @"sector-align", + @"cluster-size", + }; const op = std.meta.stringToEnum(Op, str_op) orelse return ctx.report_fatal_error( "Unknown file system operation '{s}'", .{str_op}, ); switch (op) { - .label => { - self.fat.label = try ctx.parse_string(); - }, + .label => try self.updater.set(.label, try ctx.parse_string()), + .fats => try self.updater.set(.fats, try ctx.parse_enum(fatfs.FatTables)), + .@"root-size" => try self.updater.set(.rootdir_size, try ctx.parse_integer(c_uint, 0)), + .@"sector-align" => try self.updater.set(.sector_align, try ctx.parse_integer(c_uint, 0)), + .@"cluster-size" => try self.updater.set(.cluster_size, try ctx.parse_integer(u32, 0)), } } }; @@ -86,10 +112,11 @@ fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { var workspace: [8192]u8 = undefined; fatfs.mkfs("0:", .{ .filesystem = self.format_as.get_zfat_type(), - .fats = .two, - .sector_align = 0, // default/auto - .rootdir_size = 512, // randomly chosen, might need adjustment - .use_partitions = false, + .fats = self.fats orelse .two, + .sector_align = self.sector_align orelse 0, // default/auto + .cluster_size = self.cluster_size orelse 0, + .rootdir_size = self.rootdir_size orelse 512, // randomly chosen, might need adjustment + .use_partitions = false, // we have other means for this }, &workspace) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.WriteProtected => @panic("bug in zfat"), From 155f4bf091bbc07bf26c9873e1d39443f36b30ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Thu, 13 Mar 2025 17:51:22 +0100 Subject: [PATCH 19/26] Adds label support. --- build.zig | 1 + src/components/fs/FatFileSystem.zig | 37 ++++++++++++++++++++--------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/build.zig b/build.zig index 6cd4d6b..ce5a25b 100644 --- a/build.zig +++ b/build.zig @@ -16,6 +16,7 @@ pub fn build(b: *std.Build) void { // .rtc = .dynamic, .mkfs = true, .exfat = true, + .label = true, }); const zfat_mod = zfat_dep.module("zfat"); diff --git a/src/components/fs/FatFileSystem.zig b/src/components/fs/FatFileSystem.zig index ee7d899..2f32c9e 100644 --- a/src/components/fs/FatFileSystem.zig +++ b/src/components/fs/FatFileSystem.zig @@ -6,6 +6,7 @@ const fatfs = @import("zfat"); const block_size = 512; const max_path_len = 8192; // this should be enough +const max_label_len = 11; // see http://elm-chan.org/fsw/ff/doc/setlabel.html const FAT = @This(); @@ -128,21 +129,35 @@ fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { }; const ops = self.ops.items; - if (ops.len > 0) { - filesystem.mount("0:", true) catch |err| switch (err) { - error.NotEnabled => @panic("bug in zfat"), - error.DiskErr => return error.IoError, - error.NotReady => @panic("bug in zfat disk wrapper"), - error.InvalidDrive => @panic("bug in AtomicOps"), - error.NoFilesystem => @panic("bug in zfat"), - }; - const wrapper = AtomicOps{}; + filesystem.mount("0:", true) catch |err| switch (err) { + error.NotEnabled => @panic("bug in zfat"), + error.DiskErr => return error.IoError, + error.NotReady => @panic("bug in zfat disk wrapper"), + error.InvalidDrive => @panic("bug in AtomicOps"), + error.NoFilesystem => @panic("bug in zfat"), + }; - for (ops) |op| { - try op.execute(wrapper); + if (self.label) |label| { + if (label.len <= max_label_len) { + var label_buffer: [max_label_len + 3:0]u8 = undefined; + const buf = std.fmt.bufPrintZ(&label_buffer, "0:{s}", .{label}) catch @panic("buffer too small"); + + _ = fatfs.api.setlabel(buf.ptr); + } else { + std.log.err("label \"{}\" is {} characters long, but only up to {} are permitted.", .{ + std.zig.fmtEscapes(label), + label.len, + max_label_len, + }); } } + + const wrapper = AtomicOps{}; + + for (ops) |op| { + try op.execute(wrapper); + } } const FatType = enum { From 1f829bae1bfa5d939ebf183a17d974b00e44f804 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Thu, 13 Mar 2025 20:33:57 +0100 Subject: [PATCH 20/26] Implements filesystem creation for VFAT with copying of files --- data/rootfs.dis | 4 +- src/components/fs/FatFileSystem.zig | 61 ++++++-- src/components/fs/common.zig | 197 +++++++++++++++++++++--- src/dim.zig | 40 ++++- src/shared.zig | 230 ---------------------------- 5 files changed, 263 insertions(+), 269 deletions(-) delete mode 100644 src/shared.zig diff --git a/data/rootfs.dis b/data/rootfs.dis index 3a06dbf..8000744 100644 --- a/data/rootfs.dis +++ b/data/rootfs.dis @@ -3,8 +3,8 @@ mkdir /boot/EFI/nixos/.extra-files/ mkdir /Users/xq/ # copy-XXX uses <dst> <src> syntax as it's consistent with other paths -copy-dir /Windows ./dummy/Windows -copy-file /Users/xq/README.md ./dummy/README.md +copy-dir /Windows ./rootfs/Windows +copy-file /Users/xq/README.md ./rootfs/README.md # create-file <path> <size> <contents> creates nested data create-file /Users/xq/blob.data 512k fill 0x70 diff --git a/src/components/fs/FatFileSystem.zig b/src/components/fs/FatFileSystem.zig index 2f32c9e..5b5de50 100644 --- a/src/components/fs/FatFileSystem.zig +++ b/src/components/fs/FatFileSystem.zig @@ -143,7 +143,9 @@ fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { var label_buffer: [max_label_len + 3:0]u8 = undefined; const buf = std.fmt.bufPrintZ(&label_buffer, "0:{s}", .{label}) catch @panic("buffer too small"); - _ = fatfs.api.setlabel(buf.ptr); + if (fatfs.api.setlabel(buf.ptr) != 0) { + return error.IoError; + } } else { std.log.err("label \"{}\" is {} characters long, but only up to {} are permitted.", .{ std.zig.fmtEscapes(label), @@ -186,7 +188,7 @@ const FatType = enum { }; const AtomicOps = struct { - pub fn mkdir(ops: AtomicOps, path: []const u8) !void { + pub fn mkdir(ops: AtomicOps, path: []const u8) dim.Content.RenderError!void { _ = ops; var path_buffer: [max_path_len:0]u8 = undefined; @@ -195,11 +197,22 @@ const AtomicOps = struct { const joined = try std.mem.concatWithSentinel(fba.allocator(), u8, &.{ "0:/", path }, 0); fatfs.mkdir(joined) catch |err| switch (err) { error.Exist => {}, // this is good - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.Timeout => @panic("implementation bug in fatfs glue"), + error.InvalidName => return error.ConfigurationError, + error.WriteProtected => @panic("implementation bug in fatfs glue"), + error.DiskErr => return error.IoError, + error.NotReady => @panic("implementation bug in fatfs glue"), + error.InvalidDrive => @panic("implementation bug in fatfs glue"), + error.NotEnabled => @panic("implementation bug in fatfs glue"), + error.NoFilesystem => @panic("implementation bug in fatfs glue"), + error.IntErr => return error.IoError, + error.NoPath => @panic("implementation bug in fatfs glue"), + error.Denied => @panic("implementation bug in fatfs glue"), }; } - pub fn mkfile(ops: AtomicOps, path: []const u8, host_file: std.fs.File) !void { + pub fn mkfile(ops: AtomicOps, path: []const u8, reader: anytype) dim.Content.RenderError!void { _ = ops; var path_buffer: [max_path_len:0]u8 = undefined; @@ -210,20 +223,40 @@ const AtomicOps = struct { const path_z = path_buffer[0..path.len :0]; - const stat = try host_file.stat(); - - const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig; - - _ = size; - - var fs_file = try fatfs.File.create(path_z); + var fs_file = fatfs.File.create(path_z) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.Timeout => @panic("implementation bug in fatfs glue"), + error.InvalidName => return error.ConfigurationError, + error.WriteProtected => @panic("implementation bug in fatfs glue"), + error.DiskErr => return error.IoError, + error.NotReady => @panic("implementation bug in fatfs glue"), + error.InvalidDrive => @panic("implementation bug in fatfs glue"), + error.NotEnabled => @panic("implementation bug in fatfs glue"), + error.NoFilesystem => @panic("implementation bug in fatfs glue"), + error.IntErr => return error.IoError, + error.NoFile => @panic("implementation bug in fatfs glue"), + error.NoPath => @panic("implementation bug in fatfs glue"), + error.Denied => @panic("implementation bug in fatfs glue"), + error.Exist => @panic("implementation bug in fatfs glue"), + error.InvalidObject => @panic("implementation bug in fatfs glue"), + error.Locked => @panic("implementation bug in fatfs glue"), + error.TooManyOpenFiles => @panic("implementation bug in fatfs glue"), + }; defer fs_file.close(); var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); - try fifo.pump( - host_file.reader(), + fifo.pump( + reader, fs_file.writer(), - ); + ) catch |err| switch (@as(dim.FileHandle.ReadError || fatfs.File.ReadError.Error, err)) { + error.Overflow => return error.IoError, + error.ReadFileFailed => return error.IoError, + error.Timeout => @panic("implementation bug in fatfs glue"), + error.DiskErr => return error.IoError, + error.IntErr => return error.IoError, + error.Denied => @panic("implementation bug in fatfs glue"), + error.InvalidObject => @panic("implementation bug in fatfs glue"), + }; } }; diff --git a/src/components/fs/common.zig b/src/components/fs/common.zig index 9982033..a3421e9 100644 --- a/src/components/fs/common.zig +++ b/src/components/fs/common.zig @@ -7,50 +7,181 @@ const dim = @import("../../dim.zig"); pub const FsOperation = union(enum) { copy_file: struct { - path: []const u8, + path: [:0]const u8, source: dim.FileName, }, copy_dir: struct { - path: []const u8, + path: [:0]const u8, source: dim.FileName, }, make_dir: struct { - path: []const u8, + path: [:0]const u8, }, create_file: struct { - path: []const u8, + path: [:0]const u8, size: u64, contents: dim.Content, }, pub fn execute(op: FsOperation, executor: anytype) !void { - _ = executor; - switch (op) { - .copy_file => |data| { - _ = data; - }, - .copy_dir => |data| { - _ = data; - }, - .make_dir => |data| { - _ = data; - }, - .create_file => |data| { - _ = data; - }, - } + const exec: Executor(@TypeOf(executor)) = .init(executor); + + try exec.execute(op); } }; -fn parse_path(ctx: dim.Context) ![]const u8 { +fn Executor(comptime T: type) type { + return struct { + const Exec = @This(); + + inner: T, + + fn init(wrapped: T) Exec { + return .{ .inner = wrapped }; + } + + fn execute(exec: Exec, op: FsOperation) dim.Content.RenderError!void { + switch (op) { + .make_dir => |data| { + try exec.recursive_mkdir(data.path); + }, + + .copy_file => |data| { + var handle = data.source.open() catch |err| switch (err) { + error.FileNotFound => return, // open() already reporeted the error + else => |e| return e, + }; + defer handle.close(); + + try exec.add_file(data.path, handle.reader()); + }, + .copy_dir => |data| { + var iter_dir = data.source.open_dir() catch |err| switch (err) { + error.FileNotFound => return, // open() already reporeted the error + else => |e| return e, + }; + defer iter_dir.close(); + + var walker_memory: [16384]u8 = undefined; + var temp_allocator: std.heap.FixedBufferAllocator = .init(&walker_memory); + + var path_memory: [8192]u8 = undefined; + + var walker = try iter_dir.walk(temp_allocator.allocator()); + defer walker.deinit(); + + while (walker.next() catch |err| return walk_err(err)) |entry| { + const path = std.fmt.bufPrintZ(&path_memory, "{s}/{s}", .{ + data.path, + entry.path, + }) catch @panic("buffer too small!"); + + // std.log.debug("- {s}", .{path_buffer.items}); + + switch (entry.kind) { + .file => { + const fname: dim.FileName = .{ + .root_dir = entry.dir, + .rel_path = entry.basename, + }; + + var file = try fname.open(); + defer file.close(); + + try exec.add_file(path, file.reader()); + }, + + .directory => { + try exec.recursive_mkdir(path); + }, + + else => { + var realpath_buffer: [std.fs.max_path_bytes]u8 = undefined; + std.log.warn("cannot copy file {!s}: {s} is not a supported file type!", .{ + entry.dir.realpath(entry.path, &realpath_buffer), + @tagName(entry.kind), + }); + }, + } + } + }, + + .create_file => |data| { + const buffer = try std.heap.page_allocator.alloc(u8, data.size); + defer std.heap.page_allocator.free(buffer); + + var bs: dim.BinaryStream = .init_buffer(buffer); + + try data.contents.render(&bs); + + var fbs: std.io.FixedBufferStream([]u8) = .{ .buffer = buffer, .pos = 0 }; + + try exec.add_file(data.path, fbs.reader()); + }, + } + } + + fn add_file(exec: Exec, path: [:0]const u8, reader: anytype) !void { + if (std.fs.path.dirnamePosix(path)) |dir| { + try exec.recursive_mkdir(dir); + } + + try exec.inner_mkfile(path, reader); + } + + fn recursive_mkdir(exec: Exec, path: []const u8) !void { + var i: usize = 0; + + while (std.mem.indexOfScalarPos(u8, path, i, '/')) |index| { + try exec.inner_mkdir(path[0..index]); + i = index + 1; + } + + try exec.inner_mkdir(path); + } + + fn inner_mkfile(exec: Exec, path: []const u8, reader: anytype) dim.Content.RenderError!void { + try exec.inner.mkfile(path, reader); + } + + fn inner_mkdir(exec: Exec, path: []const u8) dim.Content.RenderError!void { + try exec.inner.mkdir(path); + } + + fn walk_err(err: (std.fs.Dir.OpenError || std.mem.Allocator.Error)) dim.Content.RenderError { + return switch (err) { + error.InvalidUtf8 => error.InvalidPath, + error.InvalidWtf8 => error.InvalidPath, + error.BadPathName => error.InvalidPath, + error.NameTooLong => error.InvalidPath, + + error.OutOfMemory => error.OutOfMemory, + error.FileNotFound => error.FileNotFound, + + error.DeviceBusy => error.IoError, + error.AccessDenied => error.IoError, + error.SystemResources => error.IoError, + error.NoDevice => error.IoError, + error.Unexpected => error.IoError, + error.NetworkNotFound => error.IoError, + error.SymLinkLoop => error.IoError, + error.ProcessFdQuotaExceeded => error.IoError, + error.SystemFdQuotaExceeded => error.IoError, + error.NotDir => error.IoError, + }; + } + }; +} + +fn parse_path(ctx: dim.Context) ![:0]const u8 { const path = try ctx.parse_string(); if (path.len == 0) { try ctx.report_nonfatal_error("Path cannot be empty!", .{}); - return path; + return ""; } if (!std.mem.startsWith(u8, path, "/")) { @@ -75,7 +206,7 @@ fn parse_path(ctx: dim.Context) ![]const u8 { }); }; - return path; + return try normalize(ctx.get_arena(), path); } pub fn parse_ops(ctx: dim.Context, end_seq: []const u8, handler: anytype) !void { @@ -116,3 +247,25 @@ pub fn parse_ops(ctx: dim.Context, end_seq: []const u8, handler: anytype) !void } } } + +fn normalize(allocator: std.mem.Allocator, src_path: []const u8) ![:0]const u8 { + var list = std.ArrayList([]const u8).init(allocator); + defer list.deinit(); + + var parts = std.mem.tokenizeAny(u8, src_path, "\\/"); + + while (parts.next()) |part| { + if (std.mem.eql(u8, part, ".")) { + // "cd same" is a no-op, we can remove it + continue; + } else if (std.mem.eql(u8, part, "..")) { + // "cd up" is basically just removing the last pushed part + _ = list.pop(); + } else { + // this is an actual "descend" + try list.append(part); + } + } + + return try std.mem.joinZ(allocator, "/", list.items); +} diff --git a/src/dim.zig b/src/dim.zig index f9f8b89..dea5ec1 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -398,7 +398,14 @@ pub const FileName = struct { pub fn open(name: FileName) OpenError!FileHandle { const file = name.root_dir.openFile(name.rel_path, .{}) catch |err| switch (err) { - error.FileNotFound => return error.FileNotFound, + error.FileNotFound => { + var buffer: [std.fs.max_path_bytes]u8 = undefined; + std.log.err("failed to open \"{}/{}\": not found", .{ + std.zig.fmtEscapes(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), + std.zig.fmtEscapes(name.rel_path), + }); + return error.FileNotFound; + }, error.NameTooLong, error.InvalidWtf8, @@ -431,6 +438,37 @@ pub const FileName = struct { return .{ .file = file }; } + pub fn open_dir(name: FileName) OpenError!std.fs.Dir { + return name.root_dir.openDir(name.rel_path, .{ .iterate = true }) catch |err| switch (err) { + error.FileNotFound => { + var buffer: [std.fs.max_path_bytes]u8 = undefined; + std.log.err("failed to open \"{}/{}\": not found", .{ + std.zig.fmtEscapes(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), + std.zig.fmtEscapes(name.rel_path), + }); + return error.FileNotFound; + }, + + error.NameTooLong, + error.InvalidWtf8, + error.BadPathName, + error.InvalidUtf8, + => return error.InvalidPath, + + error.DeviceBusy, + error.AccessDenied, + error.SystemResources, + error.NoDevice, + error.Unexpected, + error.NetworkNotFound, + error.SymLinkLoop, + error.ProcessFdQuotaExceeded, + error.SystemFdQuotaExceeded, + error.NotDir, + => return error.IoError, + }; + } + pub const GetSizeError = error{ FileNotFound, InvalidPath, IoError }; pub fn get_size(name: FileName) GetSizeError!u64 { const stat = name.root_dir.statFile(name.rel_path) catch |err| switch (err) { diff --git a/src/shared.zig b/src/shared.zig deleted file mode 100644 index ef08f5d..0000000 --- a/src/shared.zig +++ /dev/null @@ -1,230 +0,0 @@ -const std = @import("std"); - -// usage: mkfs.<tool> <image> <base> <length> <filesystem> <ops...> -// <image> is a path to the image file -// <base> is the byte base of the file system -// <length> is the byte length of the file system -// <filesystem> is the file system that should be used to format -// <ops...> is a list of operations that should be performed on the file system: -// - format Formats the disk image. -// - mount Mounts the file system, must be before all following: -// - mkdir;<dst> Creates directory <dst> and all necessary parents. -// - file;<src>;<dst> Copy <src> to path <dst>. If <dst> exists, it will be overwritten. -// - dir;<src>;<dst> Copy <src> recursively into <dst>. If <dst> exists, they will be merged. -// -// <dst> paths are always rooted, even if they don't start with a /, and always use / as a path separator. -// - -pub fn App(comptime Context: type) type { - return struct { - pub var allocator: std.mem.Allocator = undefined; - pub var device: BlockDevice = undefined; - - pub fn main() !u8 { - var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); - allocator = arena.allocator(); - - const argv = try std.process.argsAlloc(allocator); - - if (argv.len <= 4) - return mistake("invalid usage", .{}); - - const image_file_path = argv[1]; - const byte_base = try std.fmt.parseInt(u64, argv[2], 0); - const byte_len = try std.fmt.parseInt(u64, argv[3], 0); - const file_system = argv[4]; - - const command_list = argv[5..]; - - if ((byte_base % BlockDevice.block_size) != 0) { - std.log.warn("offset is not a multiple of {}", .{BlockDevice.block_size}); - } - if ((byte_len % BlockDevice.block_size) != 0) { - std.log.warn("length is not a multiple of {}", .{BlockDevice.block_size}); - } - - if (command_list.len == 0) - return mistake("no commands.", .{}); - - var image_file = try std.fs.cwd().openFile(image_file_path, .{ - .mode = .read_write, - }); - defer image_file.close(); - - const stat = try image_file.stat(); - - if (byte_base + byte_len > stat.size) - return mistake("invalid offsets.", .{}); - - device = .{ - .file = &image_file, - .base = byte_base, - .count = byte_len / BlockDevice.block_size, - }; - - var path_buffer = std.ArrayList(u8).init(allocator); - defer path_buffer.deinit(); - - try path_buffer.ensureTotalCapacity(8192); - - try Context.init(file_system); - - for (command_list) |command_sequence| { - var cmd_iter = std.mem.splitScalar(u8, command_sequence, ';'); - - const command_str = cmd_iter.next() orelse return mistake("bad command", .{}); - - const command = std.meta.stringToEnum(Command, command_str) orelse return mistake("bad command: {s}", .{command_str}); - - switch (command) { - .format => { - try Context.format(); - }, - .mount => { - try Context.mount(); - }, - .mkdir => { - const dir = try normalize(cmd_iter.next() orelse return mistake("mkdir;<dst> is missing it's <dst> path!", .{})); - - // std.log.info("mkdir(\"{}\")", .{std.zig.fmtEscapes(dir)}); - - try recursiveMkDir(dir); - }, - .file => { - const src = cmd_iter.next() orelse return mistake("file;<src>;<dst> is missing it's <src> path!", .{}); - const dst = try normalize(cmd_iter.next() orelse return mistake("file;<src>;<dst> is missing it's <dst> path!", .{})); - - // std.log.info("file(\"{}\", \"{}\")", .{ std.zig.fmtEscapes(src), std.zig.fmtEscapes(dst) }); - - var file = try std.fs.cwd().openFile(src, .{}); - defer file.close(); - - try addFile(file, dst); - }, - .dir => { - const src = cmd_iter.next() orelse return mistake("dir;<src>;<dst> is missing it's <src> path!", .{}); - const dst = try normalize(cmd_iter.next() orelse return mistake("dir;<src>;<dst> is missing it's <dst> path!", .{})); - - var iter_dir = try std.fs.cwd().openDir(src, .{ .iterate = true }); - defer iter_dir.close(); - - var walker = try iter_dir.walk(allocator); - defer walker.deinit(); - - while (try walker.next()) |entry| { - path_buffer.shrinkRetainingCapacity(0); - try path_buffer.appendSlice(dst); - try path_buffer.appendSlice("/"); - try path_buffer.appendSlice(entry.path); - - const fs_path = path_buffer.items; - - // std.log.debug("- {s}", .{path_buffer.items}); - - switch (entry.kind) { - .file => { - var file = try entry.dir.openFile(entry.basename, .{}); - defer file.close(); - - try addFile(file, fs_path); - }, - - .directory => { - try recursiveMkDir(fs_path); - }, - - else => { - var realpath_buffer: [std.fs.max_path_bytes]u8 = undefined; - std.log.warn("cannot copy file {!s}: {s} is not a supported file type!", .{ - entry.dir.realpath(entry.path, &realpath_buffer), - @tagName(entry.kind), - }); - }, - } - } - }, - } - } - - return 0; - } - - fn recursiveMkDir(path: []const u8) !void { - var i: usize = 0; - - while (std.mem.indexOfScalarPos(u8, path, i, '/')) |index| { - try Context.mkdir(path[0..index]); - i = index + 1; - } - - try Context.mkdir(path); - } - - fn addFile(file: std.fs.File, fs_path: []const u8) !void { - if (std.fs.path.dirnamePosix(fs_path)) |dir| { - try recursiveMkDir(dir); - } - - try Context.mkfile(fs_path, file); - } - - fn normalize(src_path: []const u8) ![:0]const u8 { - var list = std.ArrayList([]const u8).init(allocator); - defer list.deinit(); - - var parts = std.mem.tokenizeAny(u8, src_path, "\\/"); - - while (parts.next()) |part| { - if (std.mem.eql(u8, part, ".")) { - // "cd same" is a no-op, we can remove it - continue; - } else if (std.mem.eql(u8, part, "..")) { - // "cd up" is basically just removing the last pushed part - _ = list.pop(); - } else { - // this is an actual "descend" - try list.append(part); - } - } - - return try std.mem.joinZ(allocator, "/", list.items); - } - }; -} - -const Command = enum { - format, - mount, - mkdir, - file, - dir, -}; - -pub const Block = [BlockDevice.block_size]u8; - -pub const BlockDevice = struct { - pub const block_size = 512; - - file: *std.fs.File, - base: u64, // byte base offset - count: u64, // num blocks - - pub fn write(bd: *BlockDevice, num: u64, block: Block) !void { - if (num >= bd.count) return error.InvalidBlock; - try bd.file.seekTo(bd.base + block_size * num); - try bd.file.writeAll(&block); - } - - pub fn read(bd: *BlockDevice, num: u64) !Block { - if (num >= bd.count) return error.InvalidBlock; - var block: Block = undefined; - try bd.file.seekTo(bd.base + block_size * num); - try bd.file.reader().readNoEof(&block); - return block; - } -}; - -fn mistake(comptime fmt: []const u8, args: anytype) u8 { - std.log.err(fmt, args); - return 1; -} From ccaa61bd44b0b4ac2c13c67958965bddbfca9a35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Thu, 13 Mar 2025 20:58:52 +0100 Subject: [PATCH 21/26] Adds support for --deps-file --- justfile | 1 + src/Parser.zig | 6 ++++-- src/dim.zig | 52 ++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/justfile b/justfile index 1e779d4..bf50db4 100644 --- a/justfile +++ b/justfile @@ -29,6 +29,7 @@ behaviour-tests: \ behaviour-test script: install @mkdir -p {{ join(out, parent_directory(script)) }} ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 33M + ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --deps-file {{ join(out, without_extension(script) + ".d") }} --script "{{script}}" --size 33M # TODO(fqu): sfdisk --json .dim-out/tests/part/mbr/basic-single-part-unsized.img diff --git a/src/Parser.zig b/src/Parser.zig index d4c35bc..0e469b5 100644 --- a/src/Parser.zig +++ b/src/Parser.zig @@ -9,6 +9,7 @@ const Parser = @This(); pub const Error = Tokenizer.Error || error{ FileNotFound, + InvalidPath, UnknownVariable, IoError, BadDirective, @@ -19,10 +20,10 @@ pub const Error = Tokenizer.Error || error{ }; pub const IO = struct { - fetch_file_fn: *const fn (io: *const IO, std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8, + fetch_file_fn: *const fn (io: *const IO, std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8, resolve_variable_fn: *const fn (io: *const IO, name: []const u8) error{UnknownVariable}![]const u8, - pub fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { + pub fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8 { return io.fetch_file_fn(io, allocator, path); } @@ -402,6 +403,7 @@ fn fuzz_parser(_: void, input: []const u8) !void { error.BadDirective, error.FileNotFound, error.ExpectedIncludePath, + error.InvalidPath, => continue, error.MaxIncludeDepthReached, diff --git a/src/dim.zig b/src/dim.zig index dea5ec1..384dfcf 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -19,6 +19,7 @@ const Options = struct { size: DiskSize = DiskSize.empty, script: ?[]const u8 = null, @"import-env": bool = false, + @"deps-file": ?[]const u8 = null, }; const usage = @@ -41,6 +42,8 @@ const usage = const VariableMap = std.StringArrayHashMapUnmanaged([]const u8); +var global_deps_file: ?std.fs.File = null; + pub fn main() !u8 { var gpa_impl: std.heap.DebugAllocator(.{}) = .init; defer _ = gpa_impl.deinit(); @@ -95,6 +98,19 @@ pub fn main() !u8 { const script_source = try current_dir.readFileAlloc(gpa, script_path, max_script_size); defer gpa.free(script_source); + if (options.@"deps-file") |deps_file_path| { + global_deps_file = try std.fs.cwd().createFile(deps_file_path, .{}); + + try global_deps_file.?.writer().print( + \\{s}: {s} + , .{ + output_path, + script_path, + }); + } + defer if (global_deps_file) |deps_file| + deps_file.close(); + var mem_arena: std.heap.ArenaAllocator = .init(gpa); defer mem_arena.deinit(); @@ -143,9 +159,20 @@ pub fn main() !u8 { try root_content.render(&stream); } + if (global_deps_file) |deps_file| { + try deps_file.writeAll("\n"); + } + return 0; } +pub fn declare_file_dependency(path: []const u8) !void { + const deps_file = global_deps_file orelse return; + + try deps_file.writeAll(" \\\n "); + try deps_file.writeAll(path); +} + fn fatal(msg: []const u8) noreturn { std.debug.print("Error: {s}\n", .{msg}); std.debug.print("Usage: {s}", .{usage}); @@ -325,8 +352,12 @@ const Environment = struct { std.log.err("PARSE ERROR: " ++ fmt, params); } - fn fetch_file(io: *const Parser.IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { + fn fetch_file(io: *const Parser.IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8 { const env: *const Environment = @fieldParentPtr("io", io); + + const name: FileName = .{ .root_dir = env.include_base, .rel_path = path }; + try name.declare_dependency(); + return env.include_base.readFileAlloc(allocator, path, max_script_size) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.FileNotFound => return error.FileNotFound, @@ -435,11 +466,14 @@ pub const FileName = struct { error.FileBusy, => return error.IoError, }; + + try name.declare_dependency(); + return .{ .file = file }; } pub fn open_dir(name: FileName) OpenError!std.fs.Dir { - return name.root_dir.openDir(name.rel_path, .{ .iterate = true }) catch |err| switch (err) { + const dir = name.root_dir.openDir(name.rel_path, .{ .iterate = true }) catch |err| switch (err) { error.FileNotFound => { var buffer: [std.fs.max_path_bytes]u8 = undefined; std.log.err("failed to open \"{}/{}\": not found", .{ @@ -467,6 +501,20 @@ pub const FileName = struct { error.NotDir, => return error.IoError, }; + + try name.declare_dependency(); + + return dir; + } + + pub fn declare_dependency(name: FileName) OpenError!void { + var buffer: [std.fs.max_path_bytes]u8 = undefined; + + const realpath = name.root_dir.realpath( + name.rel_path, + &buffer, + ) catch @panic("failed to determine real path for dependency file!"); + declare_file_dependency(realpath) catch @panic("Failed to write to deps file!"); } pub const GetSizeError = error{ FileNotFound, InvalidPath, IoError }; From 5c254b14013a2256181d107326d00e99937d2172 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Thu, 13 Mar 2025 21:10:22 +0100 Subject: [PATCH 22/26] Improves error reporting a bit. --- justfile | 3 ++- src/dim.zig | 38 +++++++++++++++++++++++++++++-------- tests/compound/mbr-boot.dis | 18 ++++++++++++++++++ 3 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 tests/compound/mbr-boot.dis diff --git a/justfile b/justfile index bf50db4..9177b91 100644 --- a/justfile +++ b/justfile @@ -24,7 +24,8 @@ behaviour-tests: \ (behaviour-test "tests/part/mbr/basic-single-part-sized.dis") \ (behaviour-test "tests/fs/fat12.dis") \ (behaviour-test "tests/fs/fat16.dis") \ - (behaviour-test "tests/fs/fat32.dis") + (behaviour-test "tests/fs/fat32.dis") \ + (behaviour-test "tests/compound/mbr-boot.dis") behaviour-test script: install @mkdir -p {{ join(out, parent_directory(script)) }} diff --git a/src/dim.zig b/src/dim.zig index 384dfcf..dba52da 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -228,10 +228,21 @@ pub const Context = struct { pub fn parse_enum(ctx: Context, comptime E: type) Environment.ParseError!E { if (@typeInfo(E) != .@"enum") @compileError("get_enum requires an enum type!"); - return std.meta.stringToEnum( + const tag_name = try ctx.parse_string(); + const converted = std.meta.stringToEnum( E, - try ctx.parse_string(), - ) orelse return error.InvalidEnumTag; + tag_name, + ); + if (converted) |ok| + return ok; + std.debug.print("detected invalid enum tag for {s}: \"{}\"\n", .{ @typeName(E), std.zig.fmtEscapes(tag_name) }); + std.debug.print("valid options are:\n", .{}); + + for (std.enums.values(E)) |val| { + std.debug.print("- '{s}'\n", .{@tagName(val)}); + } + + return error.InvalidEnumTag; } pub fn parse_integer(ctx: Context, comptime I: type, base: u8) Environment.ParseError!I { @@ -355,14 +366,25 @@ const Environment = struct { fn fetch_file(io: *const Parser.IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8 { const env: *const Environment = @fieldParentPtr("io", io); - const name: FileName = .{ .root_dir = env.include_base, .rel_path = path }; - try name.declare_dependency(); - - return env.include_base.readFileAlloc(allocator, path, max_script_size) catch |err| switch (err) { + const contents = env.include_base.readFileAlloc(allocator, path, max_script_size) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - error.FileNotFound => return error.FileNotFound, + error.FileNotFound => { + const ctx = Context{ .env = @constCast(env) }; + var buffer: [std.fs.max_path_bytes]u8 = undefined; + try ctx.report_nonfatal_error("failed to open file: \"{}/{}\"", .{ + std.zig.fmtEscapes(env.include_base.realpath(".", &buffer) catch return error.FileNotFound), + std.zig.fmtEscapes(path), + }); + return error.FileNotFound; + }, else => return error.IoError, }; + errdefer allocator.free(contents); + + const name: FileName = .{ .root_dir = env.include_base, .rel_path = path }; + try name.declare_dependency(); + + return contents; } fn resolve_var(io: *const Parser.IO, name: []const u8) error{UnknownVariable}![]const u8 { diff --git a/tests/compound/mbr-boot.dis b/tests/compound/mbr-boot.dis new file mode 100644 index 0000000..9475d1b --- /dev/null +++ b/tests/compound/mbr-boot.dis @@ -0,0 +1,18 @@ +mbr-part + bootloader empty + part # partition 1 + type fat16_lba + contains vfat fat16 + label "BOOT" + endfat + size 10M + endpart + part # partition 2 + type fat16_lba + contains vfat fat16 + label "OS" + !include "../../data/rootfs.dis" + endfat + endpart + ignore # partition 3 + ignore # partition 4 From d4d5516d884c1efd0be2b5680b6cb814906ceb0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Fri, 14 Mar 2025 13:40:15 +0100 Subject: [PATCH 23/26] Updates README. --- README.md | 113 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 105 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 8fbfcdc..9b4c109 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ -# Disk Image Creator +# 💡 Dimmer - The Disk Imager -The Disk Image Creator is a tool that uses a simple textual description of a disk image to create actual images. +> *Realize bright ideas with less energy!* -This tool is incredibly valuable when implementing your own operating system or deployments. +Dimmer is a tool that uses a simple textual description of a disk image to create actual images. + +This tool is incredibly valuable when implementing your own operating system, embedded systems or other kinds of deployment. ## Example @@ -12,10 +14,6 @@ This tool is incredibly valuable when implementing your own operating system or ## Available Content Types -```plain - -``` - ### Empty Content (`empty`) This type of content does not change its range at all and keeps it empty. No bytes will be emitted. @@ -45,23 +43,122 @@ paste-file <path> ### MBR Partition Table (`mbr-part`) ```plain +mbr-part + [bootloader <content>] + [part <…> | ignore] # partition 1 + [part <…> | ignore] # partition 2 + [part <…> | ignore] # partition 3 + [part <…> | ignore] # partition 4 +``` +```plain +part + type <type-id> + [bootable] + [size <bytes>] + [offset <bytes>] + contains <content> +endpart ``` +If `bootloader <content>` is given, will copy the `<content>` into the boot block, setting the boot code. + +The `mbr-part` component will end after all 4 partitions are specified. + +- Each partition must specify the `<type-id>` (see table below) to mark the partition type as well as `contains <content>` which defines what's stored in the partition. +- If `bootable` is present, the partition is marked as bootable. +- `size <bytes>` is required for all but the last partition and defines the size in bytes. It can use disk-size specifiers. +- `offset <bytes>` is required for either all or no partition and defines the disk offset for the partitions. This can be used to explicitly place the partitions. + +#### Partition Types + +| Type | ID | Description | +| ------------ | ---- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| `empty` | 0x00 | No content | +| `fat12` | 0x01 | [FAT12](https://en.wikipedia.org/wiki/FAT12) | +| `ntfs` | 0x07 | [NTFS](https://en.wikipedia.org/wiki/NTFS) | +| `fat32_chs` | 0x0B | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [CHS](https://en.wikipedia.org/wiki/Cylinder-head-sector) addressing | +| `fat32_lba` | 0x0C | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | +| `fat16_lba` | 0x0E | [FAT16B](https://en.wikipedia.org/wiki/File_Allocation_Table#FAT16B) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | +| `linux_swap` | 0x82 | [Linux swap space](https://en.wikipedia.org/wiki/Swap_space#Linux) | +| `linux_fs` | 0x83 | Any [Linux file system](https://en.wikipedia.org/wiki/File_system#Linux) | +| `linux_lvm` | 0x8E | [Linux LVM](https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)) | + +A complete list can be [found on Wikipedia](https://en.wikipedia.org/wiki/Partition_type), but [we do not support that yet](https://github.com/zig-osdev/disk-image-step/issues/8). + ### GPT Partition Table (`gpt-part`) ```plain ``` -### FAT File System (`fat`) +### FAT File System (`vfat`) ```plain +vfat <type> + [label <fs-label>] + [fats <fatcount>] + [root-size <count>] + [sector-align <align>] + [cluster-size <size>] + <fs-ops...> +endfat +``` + +| Parameter | Values | Description | +| ------------ | ------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `<type>` | `fat12`, `fat16`, `fat32` | Selects the type of FAT filesystem that is created | +| `<fatcount>` | `one`, `two` | Number of FAT count. Select between small and safe. | +| `<fs-label>` | ascii string <= 11 chars | Display name of the volume. | +| `<count>` | integers <= 32768 | Number of entries in the root directory. | +| `<align>` | power of two >= 1 and <= 32768 | Specifies alignment of the volume data area (file allocation pool, usually erase block boundary of flash memory media) in unit of sector. The valid value for this member is between 1 and 32768 inclusive in power of 2. If a zero (the default value) or any invalid value is given, the function obtains the block size from lower layer with disk_ioctl function. | +| `<size>` | powers of two | Specifies size of the allocation unit (cluter) in unit of byte. | +## Standard Filesystem Operations + +All `<path>` values use an absolute unix-style path, starting with a `/` and using `/` as a file separator. + +All operations do create the parent directories if necessary. + +### Create Directory (`mkdir`) + +```plain +mkdir <path> ``` +Creates a directory. + +### Create File (`create-file`) + +```plain +create-file <path> <size> <content> +``` + +Creates a file in the file system with `<size>` bytes (can use sized spec) and embeds another `<content>` element. + +This can be used to construct special or nested files ad-hoc. + +### Copy File (`copy-file`) + +```plain +copy-file <path> <host-path> +``` + +Copies a file from `<host-path>` (relative to the current file) into the filesystem at `<path>`. + +### Copy Directory (`copy-dir`) + +```plain +copy-file <path> <host-path> +``` + +Copies a directory from `<host-path>` (relative to the current file) *recursively* into the filesystem at `<path>`. + +This will include *all files* from `<host-path>`. + ## Compiling + - Install [Zig 0.14.0](https://ziglang.org/download/). - Invoke `zig build -Drelease` in the repository root. - Execute `./zig-out/bin/dim --help` to verify your compilation worked. From 6036cd41aa4cc5b867e87bcda3e3974b847d4388 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Fri, 14 Mar 2025 14:41:19 +0100 Subject: [PATCH 24/26] Starts to implement build.zig interface --- build.zig | 5 +- build.zig.zon | 4 +- justfile | 11 +- src/BuildInterface.zig | 159 ++++++++++++++++++++++++ tests/zig-build-interface/build.zig | 126 +++++++++++++++++++ tests/zig-build-interface/build.zig.zon | 13 ++ 6 files changed, 311 insertions(+), 7 deletions(-) create mode 100644 src/BuildInterface.zig create mode 100644 tests/zig-build-interface/build.zig create mode 100644 tests/zig-build-interface/build.zig.zon diff --git a/build.zig b/build.zig index ce5a25b..3962284 100644 --- a/build.zig +++ b/build.zig @@ -1,5 +1,6 @@ const std = @import("std"); -const builtin = @import("builtin"); + +pub const BuildInterface = @import("src/BuildInterface.zig"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); @@ -34,7 +35,7 @@ pub fn build(b: *std.Build) void { dim_mod.addImport("zfat", zfat_mod); const dim_exe = b.addExecutable(.{ - .name = "dim", + .name = "dimmer", .root_module = dim_mod, }); b.installArtifact(dim_exe); diff --git a/build.zig.zon b/build.zig.zon index bb78edd..3ceb020 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,7 +1,7 @@ .{ - .name = .disk_image_step, + .name = .dimmer, .version = "2.0.0", - .fingerprint = 0xdaabde74a06664f7, + .fingerprint = 0x9947018c924eecb2, .dependencies = .{ .zfat = .{ .url = "https://github.com/ZigEmbeddedGroup/zfat/archive/3ce06d43a4e04d387034dcae2f486b050701f321.tar.gz", diff --git a/justfile b/justfile index 9177b91..6819811 100644 --- a/justfile +++ b/justfile @@ -8,7 +8,7 @@ default: install test install: {{zig}} build install -test: unit-test behaviour-tests +test: unit-test behaviour-tests build-test unit-test: {{zig}} build test @@ -29,10 +29,15 @@ behaviour-tests: \ behaviour-test script: install @mkdir -p {{ join(out, parent_directory(script)) }} - ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 33M - ./zig-out/bin/dim --output {{ join(out, without_extension(script) + ".img") }} --deps-file {{ join(out, without_extension(script) + ".d") }} --script "{{script}}" --size 33M + ./zig-out/bin/dimmer --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 33M + ./zig-out/bin/dimmer --output {{ join(out, without_extension(script) + ".img") }} --deps-file {{ join(out, without_extension(script) + ".d") }} --script "{{script}}" --size 33M # TODO(fqu): sfdisk --json .dim-out/tests/part/mbr/basic-single-part-unsized.img + +[working-directory: 'tests/zig-build-interface'] +build-test: + {{zig}} build + fuzz: {{zig}} build install test --fuzz --port 35991 diff --git a/src/BuildInterface.zig b/src/BuildInterface.zig new file mode 100644 index 0000000..815b585 --- /dev/null +++ b/src/BuildInterface.zig @@ -0,0 +1,159 @@ +//! +//! This file implements the Zig build system interface for Dimmer. +//! +//! It is included by it's build.zig +//! +const std = @import("std"); + +const Interface = @This(); + +builder: *std.Build, +dimmer_exe: *std.Build.Step.Compile, + +pub fn init(builder: *std.Build, dep: *std.Build.Dependency) Interface { + return .{ + .builder = builder, + .dimmer_exe = dep.artifact("dimmer"), + }; +} + +pub fn createDisk(dimmer: Interface, size: u64, content: Content) std.Build.LazyPath { + const b = dimmer.builder; + + const write_files = b.addWriteFiles(); + + const script_source, const variables = renderContent(write_files, b.allocator, content); + + const script_file = write_files.add("image.dis", script_source); + + const compile_script = b.addRunArtifact(dimmer.dimmer_exe); + + _ = compile_script.addPrefixedDepFileOutputArg("--deps-file=", "image.d"); + + compile_script.addArg(b.fmt("--size={d}", .{size})); + + compile_script.addPrefixedFileArg("--script=", script_file); + + const result_file = compile_script.addPrefixedOutputFileArg("--output=", "disk.img"); + + { + var iter = variables.iterator(); + while (iter.next()) |kvp| { + const key = kvp.key_ptr.*; + const value = kvp.value_ptr.*; + + compile_script.addPrefixedFileArg( + b.fmt("{s}=", .{key}), + value, + ); + } + } + + return result_file; +} + +fn renderContent(wfs: *std.Build.Step.WriteFile, allocator: std.mem.Allocator, content: Content) struct { []const u8, std.StringHashMap(std.Build.LazyPath) } { + var code: std.ArrayList(u8) = .init(allocator); + defer code.deinit(); + + var variables: std.StringHashMap(std.Build.LazyPath) = .init(allocator); + + renderContentInner( + wfs, + code.writer(), + &variables, + content, + ) catch @panic("out of memory"); + + const source = std.mem.trim( + u8, + code.toOwnedSlice() catch @panic("out of memory"), + " \r\n\t", + ); + + return .{ source, variables }; +} + +fn renderContentInner( + wfs: *std.Build.Step.WriteFile, + code: std.ArrayList(u8).Writer, + vars: *std.StringHashMap(std.Build.LazyPath), + content: Content, +) !void { + // Always insert some padding before and after: + try code.writeAll(" "); + errdefer code.writeAll(" ") catch {}; + + switch (content) { + .empty => { + try code.writeAll("empty"); + }, + + .fill => |data| { + try code.print("fill 0x{X:0>2}", .{data}); + }, + + .paste_file => |data| { + try code.writeAll("paste-file "); + try renderLazyPath(wfs, code, vars, data); + }, + + .mbr_part_table => |data| { + _ = data; + @panic("not supported yet!"); + }, + .vfat => |data| { + _ = data; + @panic("not supported yet!"); + }, + } +} + +fn renderLazyPath( + wfs: *std.Build.Step.WriteFile, + code: std.ArrayList(u8).Writer, + vars: *std.StringHashMap(std.Build.LazyPath), + path: std.Build.LazyPath, +) !void { + switch (path) { + .cwd_relative, + .dependency, + .src_path, + => { + // We can safely call getPath2 as we can fully resolve the path + // already + const full_path = path.getPath2(wfs.step.owner, &wfs.step); + + std.debug.assert(std.fs.path.isAbsolute(full_path)); + + try code.writeAll(full_path); + }, + + .generated => { + // this means we can't emit the variable just verbatim, but we + // actually have a build-time dependency + const var_id = vars.count() + 1; + const var_name = wfs.step.owner.fmt("PATH{}", .{var_id}); + + try vars.put(var_name, path); + + try code.print("${s}", .{var_name}); + }, + } +} + +pub const Content = union(enum) { + empty, + fill: u8, + paste_file: std.Build.LazyPath, + mbr_part_table: MbrPartTable, + vfat: FatFs, +}; + +pub const MbrPartTable = struct { + // +}; + +pub const FatFs = struct { + // +}; diff --git a/tests/zig-build-interface/build.zig b/tests/zig-build-interface/build.zig new file mode 100644 index 0000000..3898aad --- /dev/null +++ b/tests/zig-build-interface/build.zig @@ -0,0 +1,126 @@ +const std = @import("std"); +const Dimmer = @import("dimmer").BuildInterface; + +pub const KiB = 1024; +pub const MiB = 1024 * KiB; +pub const GiB = 1024 * MiB; + +pub fn build(b: *std.Build) void { + const dimmer_dep = b.dependency("dimmer", .{}); + + const dimmer: Dimmer = .init(b, dimmer_dep); + + const install_step = b.getInstallStep(); + + installDebugDisk(dimmer, install_step, "empty.img", 50 * KiB, .empty); + installDebugDisk(dimmer, install_step, "fill-0x00.img", 50 * KiB, .{ .fill = 0x00 }); + installDebugDisk(dimmer, install_step, "fill-0xAA.img", 50 * KiB, .{ .fill = 0xAA }); + installDebugDisk(dimmer, install_step, "fill-0xFF.img", 50 * KiB, .{ .fill = 0xFF }); + installDebugDisk(dimmer, install_step, "paste-file.img", 50 * KiB, .{ .paste_file = b.path("build.zig.zon") }); + + // installDebugDisk(dimmer, install_step, "empty-mbr.img", 50 * MiB, .{ + // .mbr_part_table = .{ + // .partitions = .{ + // null, + // null, + // null, + // null, + // }, + // }, + // }); + + // installDebugDisk(dimmer, install_step, "manual-offset-mbr.img", 50 * MiB, .{ + // .mbr_part_table = .{ + // .partitions = .{ + // &.{ .offset = 2048 + 0 * 10 * MiB, .size = 10 * MiB, .bootable = true, .type = .fat32_lba, .data = .empty }, + // &.{ .offset = 2048 + 1 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .ntfs, .data = .empty }, + // &.{ .offset = 2048 + 2 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_swap, .data = .empty }, + // &.{ .offset = 2048 + 3 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .empty }, + // }, + // }, + // }); + + // installDebugDisk(dimmer, install_step, "auto-offset-mbr.img", 50 * MiB, .{ + // .mbr_part_table = .{ + // .partitions = .{ + // &.{ .size = 7 * MiB, .bootable = true, .type = .fat32_lba, .data = .empty }, + // &.{ .size = 8 * MiB, .bootable = false, .type = .ntfs, .data = .empty }, + // &.{ .size = 9 * MiB, .bootable = false, .type = .linux_swap, .data = .empty }, + // &.{ .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .empty }, + // }, + // }, + // }); + + // installDebugDisk(dimmer, install_step, "empty-fat32.img", 50 * MiB, .{ + // .vfat = .{ + // .format = .fat32, + // .label = "EMPTY", + // .items = &.{}, + // }, + // }); + + // installDebugDisk(dimmer, install_step, "initialized-fat32.img", 50 * MiB, .{ + // .vfat = .{ + // .format = .fat32, + // .label = "ROOTFS", + // .items = &.{ + // .{ .empty_dir = "boot/EFI/refind/icons" }, + // .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, + // .{ .empty_dir = "Users/xq/" }, + // .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, + // .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, + // }, + // }, + // }); + + // installDebugDisk(dimmer, install_step, "initialized-fat32-in-mbr-partitions.img", 100 * MiB, .{ + // .mbr = .{ + // .partitions = .{ + // &.{ + // .size = 90 * MiB, + // .bootable = true, + // .type = .fat32_lba, + // .data = .{ + // .vfat = .{ + // .format = .fat32, + // .label = "ROOTFS", + // .items = &.{ + // .{ .empty_dir = "boot/EFI/refind/icons" }, + // .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, + // .{ .empty_dir = "Users/xq/" }, + // .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, + // .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, + // }, + // }, + // }, + // }, + // null, + // null, + // null, + // }, + // }, + // }); + + // TODO: Implement GPT partition support + // installDebugDisk(debug_step, "empty-gpt.img", 50 * MiB, .{ + // .gpt = .{ + // .partitions = &.{}, + // }, + // }); +} + +fn installDebugDisk( + dimmer: Dimmer, + install_step: *std.Build.Step, + name: []const u8, + size: u64, + content: Dimmer.Content, +) void { + const disk_file = dimmer.createDisk(size, content); + + const install_disk = install_step.owner.addInstallFile( + disk_file, + name, + ); + install_step.dependOn(&install_disk.step); +} diff --git a/tests/zig-build-interface/build.zig.zon b/tests/zig-build-interface/build.zig.zon new file mode 100644 index 0000000..1f41b7b --- /dev/null +++ b/tests/zig-build-interface/build.zig.zon @@ -0,0 +1,13 @@ +.{ + .name = .dimmer_usage_demo, + .fingerprint = 0x6a630b1cfa8384c, + .version = "1.0.0", + .dependencies = .{ + .dimmer = .{ + .path = "../..", + }, + }, + .paths = .{ + ".", + }, +} From 2409760d9c11469f2cfe8acf9d72984e345e18ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Fri, 14 Mar 2025 14:58:14 +0100 Subject: [PATCH 25/26] Makes MBR partition types use name lookup + numeric option instead of fixed enum. --- README.md | 36 +++++++--- src/components/part/MbrPartitionTable.zig | 83 +++++++++-------------- tests/compound/mbr-boot.dis | 4 +- 3 files changed, 63 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index 9b4c109..cd908cf 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,28 @@ This tool is incredibly valuable when implementing your own operating system, em ## Example -```plain - +```rb +mbr-part + bootloader paste-file "./syslinux.bin" + part # partition 1 + type fat16-lba + size 25M + contains vfat fat16 + label "BOOT" + copy-dir "/syslinux" "./bootfs/syslinux" + endfat + endpart + part # partition 2 + type fat32-lba + contains vfat fat32 + label "OS" + mkdir "/home/dimmer" + copy-file "/home/dimmer/.config/dimmer.cfg" "./dimmer.cfg" + !include "./rootfs/files.dis" + endfat + endpart + ignore # partition 3 + ignore # partition 4 ``` ## Available Content Types @@ -77,12 +97,12 @@ The `mbr-part` component will end after all 4 partitions are specified. | `empty` | 0x00 | No content | | `fat12` | 0x01 | [FAT12](https://en.wikipedia.org/wiki/FAT12) | | `ntfs` | 0x07 | [NTFS](https://en.wikipedia.org/wiki/NTFS) | -| `fat32_chs` | 0x0B | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [CHS](https://en.wikipedia.org/wiki/Cylinder-head-sector) addressing | -| `fat32_lba` | 0x0C | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | -| `fat16_lba` | 0x0E | [FAT16B](https://en.wikipedia.org/wiki/File_Allocation_Table#FAT16B) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | -| `linux_swap` | 0x82 | [Linux swap space](https://en.wikipedia.org/wiki/Swap_space#Linux) | -| `linux_fs` | 0x83 | Any [Linux file system](https://en.wikipedia.org/wiki/File_system#Linux) | -| `linux_lvm` | 0x8E | [Linux LVM](https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)) | +| `fat32-chs` | 0x0B | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [CHS](https://en.wikipedia.org/wiki/Cylinder-head-sector) addressing | +| `fat32-lba` | 0x0C | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | +| `fat16-lba` | 0x0E | [FAT16B](https://en.wikipedia.org/wiki/File_Allocation_Table#FAT16B) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | +| `linux-swap` | 0x82 | [Linux swap space](https://en.wikipedia.org/wiki/Swap_space#Linux) | +| `linux-fs` | 0x83 | Any [Linux file system](https://en.wikipedia.org/wiki/File_system#Linux) | +| `linux-lvm` | 0x8E | [Linux LVM](https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)) | A complete list can be [found on Wikipedia](https://en.wikipedia.org/wiki/Partition_type), but [we do not support that yet](https://github.com/zig-osdev/disk-image-step/issues/8). diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig index 568afa4..16589d4 100644 --- a/src/components/part/MbrPartitionTable.zig +++ b/src/components/part/MbrPartitionTable.zig @@ -88,7 +88,7 @@ fn parse_partition(ctx: dim.Context) !Partition { .offset = null, .size = null, .bootable = false, - .type = .empty, + .type = 0x00, .contains = .empty, }; @@ -108,7 +108,19 @@ fn parse_partition(ctx: dim.Context) !Partition { endpart, }); try switch (kw) { - .type => updater.set(.type, try ctx.parse_enum(PartitionType)), + .type => { + const part_name = try ctx.parse_string(); + + const encoded = if (std.fmt.parseInt(u8, part_name, 0)) |value| + value + else |_| + known_partition_types.get(part_name) orelse blk: { + try ctx.report_nonfatal_error("unknown partition type '{}'", .{std.zig.fmtEscapes(part_name)}); + break :blk 0x00; + }; + + try updater.set(.type, encoded); + }, .bootable => updater.set(.bootable, true), .size => updater.set(.size, try ctx.parse_mem_size()), .offset => updater.set(.offset, try ctx.parse_mem_size()), @@ -212,7 +224,7 @@ fn render(table: *PartTable, stream: *dim.BinaryStream) dim.Content.RenderError! desc[0] = if (part.bootable) 0x80 else 0x00; desc[1..4].* = encodeMbrChsEntry(lba); // chs_start - desc[4] = @intFromEnum(part.type); + desc[4] = part.type; desc[5..8].* = encodeMbrChsEntry(lba + size - 1); // chs_end std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start std.mem.writeInt(u32, desc[12..16], size, .little); // block_count @@ -241,57 +253,28 @@ pub const Partition = struct { size: ?u64, bootable: bool, - type: PartitionType, + type: u8, contains: dim.Content, }; -/// https://en.wikipedia.org/wiki/Partition_type -pub const PartitionType = enum(u8) { - empty = 0x00, - - fat12 = 0x01, - ntfs = 0x07, - - fat32_chs = 0x0B, - fat32_lba = 0x0C, - - fat16_lba = 0x0E, - - linux_swap = 0x82, - linux_fs = 0x83, - linux_lvm = 0x8E, - - // Output from fdisk (util-linux 2.38.1) - // 00 Leer 27 Verst. NTFS Win 82 Linux Swap / So c1 DRDOS/sec (FAT- - // 01 FAT12 39 Plan 9 83 Linux c4 DRDOS/sec (FAT- - // 02 XENIX root 3c PartitionMagic 84 versteckte OS/2 c6 DRDOS/sec (FAT- - // 03 XENIX usr 40 Venix 80286 85 Linux erweitert c7 Syrinx - // 04 FAT16 <32M 41 PPC PReP Boot 86 NTFS Datenträge da Keine Dateisyst - // 05 Erweiterte 42 SFS 87 NTFS Datenträge db CP/M / CTOS / . - // 06 FAT16 4d QNX4.x 88 Linux Klartext de Dell Dienstprog - // 07 HPFS/NTFS/exFAT 4e QNX4.x 2. Teil 8e Linux LVM df BootIt - // 08 AIX 4f QNX4.x 3. Teil 93 Amoeba e1 DOS-Zugriff - // 09 AIX bootfähig 50 OnTrack DM 94 Amoeba BBT e3 DOS R/O - // 0a OS/2-Bootmanage 51 OnTrack DM6 Aux 9f BSD/OS e4 SpeedStor - // 0b W95 FAT32 52 CP/M a0 IBM Thinkpad Ru ea Linux erweitert - // 0c W95 FAT32 (LBA) 53 OnTrack DM6 Aux a5 FreeBSD eb BeOS Dateisyste - // 0e W95 FAT16 (LBA) 54 OnTrackDM6 a6 OpenBSD ee GPT - // 0f W95 Erw. (LBA) 55 EZ-Drive a7 NeXTSTEP ef EFI (FAT-12/16/ - // 10 OPUS 56 Golden Bow a8 Darwin UFS f0 Linux/PA-RISC B - // 11 Verst. FAT12 5c Priam Edisk a9 NetBSD f1 SpeedStor - // 12 Compaq Diagnost 61 SpeedStor ab Darwin Boot f4 SpeedStor - // 14 Verst. FAT16 <3 63 GNU HURD oder S af HFS / HFS+ f2 DOS sekundär - // 16 Verst. FAT16 64 Novell Netware b7 BSDi Dateisyste f8 EBBR geschützt - // 17 Verst. HPFS/NTF 65 Novell Netware b8 BSDI Swap fb VMware VMFS - // 18 AST SmartSleep 70 DiskSecure Mult bb Boot-Assistent fc VMware VMKCORE - // 1b Verst. W95 FAT3 75 PC/IX bc Acronis FAT32 L fd Linux RAID-Auto - // 1c Verst. W95 FAT3 80 Altes Minix be Solaris Boot fe LANstep - // 1e Verst. W95 FAT1 81 Minix / altes L bf Solaris ff BBT - // 24 NEC DOS - - _, -}; +// TODO: Fill from https://en.wikipedia.org/wiki/Partition_type +const known_partition_types = std.StaticStringMap(u8).initComptime(.{ + .{ "empty", 0x00 }, + + .{ "fat12", 0x01 }, + + .{ "ntfs", 0x07 }, + + .{ "fat32-chs", 0x0B }, + .{ "fat32-lba", 0x0C }, + + .{ "fat16-lba", 0x0E }, + + .{ "linux-swap", 0x82 }, + .{ "linux-fs", 0x83 }, + .{ "linux-lvm", 0x8E }, +}); pub fn encodeMbrChsEntry(lba: u32) [3]u8 { var chs = lbaToChs(lba); diff --git a/tests/compound/mbr-boot.dis b/tests/compound/mbr-boot.dis index 9475d1b..6bd6b7f 100644 --- a/tests/compound/mbr-boot.dis +++ b/tests/compound/mbr-boot.dis @@ -1,14 +1,14 @@ mbr-part bootloader empty part # partition 1 - type fat16_lba + type fat16-lba contains vfat fat16 label "BOOT" endfat size 10M endpart part # partition 2 - type fat16_lba + type fat16-lba contains vfat fat16 label "OS" !include "../../data/rootfs.dis" From 06660a8b769ee550415b5b55d5fc2f26cf7980ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20=22xq=22=20Quei=C3=9Fner?= <git@random-projects.net> Date: Sun, 23 Mar 2025 20:42:00 +0100 Subject: [PATCH 26/26] Implements enough of the build system interface to enable Ashet OS to build again. --- src/BuildInterface.zig | 383 +++++++++++++++++++++++++++++++++-------- src/dim.zig | 18 +- 2 files changed, 326 insertions(+), 75 deletions(-) diff --git a/src/BuildInterface.zig b/src/BuildInterface.zig index 815b585..f6d1509 100644 --- a/src/BuildInterface.zig +++ b/src/BuildInterface.zig @@ -7,6 +7,10 @@ const std = @import("std"); const Interface = @This(); +pub const kiB = 1024; +pub const MiB = 1024 * 1024; +pub const GiB = 1024 * 1024 * 1024; + builder: *std.Build, dimmer_exe: *std.Build.Step.Compile, @@ -40,30 +44,37 @@ pub fn createDisk(dimmer: Interface, size: u64, content: Content) std.Build.Lazy var iter = variables.iterator(); while (iter.next()) |kvp| { const key = kvp.key_ptr.*; - const value = kvp.value_ptr.*; - - compile_script.addPrefixedFileArg( - b.fmt("{s}=", .{key}), - value, - ); + const path, const usage = kvp.value_ptr.*; + + switch (usage) { + .file => compile_script.addPrefixedFileArg( + b.fmt("{s}=", .{key}), + path, + ), + .directory => compile_script.addPrefixedDirectoryArg( + b.fmt("{s}=", .{key}), + path, + ), + } } } return result_file; } -fn renderContent(wfs: *std.Build.Step.WriteFile, allocator: std.mem.Allocator, content: Content) struct { []const u8, std.StringHashMap(std.Build.LazyPath) } { +fn renderContent(wfs: *std.Build.Step.WriteFile, allocator: std.mem.Allocator, content: Content) struct { []const u8, ContentWriter.VariableMap } { var code: std.ArrayList(u8) = .init(allocator); defer code.deinit(); - var variables: std.StringHashMap(std.Build.LazyPath) = .init(allocator); + var variables: ContentWriter.VariableMap = .init(allocator); + + var cw: ContentWriter = .{ + .code = code.writer(), + .wfs = wfs, + .vars = &variables, + }; - renderContentInner( - wfs, - code.writer(), - &variables, - content, - ) catch @panic("out of memory"); + cw.render(content) catch @panic("out of memory"); const source = std.mem.trim( u8, @@ -71,76 +82,201 @@ fn renderContent(wfs: *std.Build.Step.WriteFile, allocator: std.mem.Allocator, c " \r\n\t", ); + variables.sort(struct { + map: *ContentWriter.VariableMap, + + pub fn lessThan(ctx: @This(), lhs: usize, rhs: usize) bool { + return std.mem.lessThan(u8, ctx.map.keys()[lhs], ctx.map.keys()[rhs]); + } + }{ + .map = &variables, + }); + return .{ source, variables }; } -fn renderContentInner( +const ContentWriter = struct { + pub const VariableMap = std.StringArrayHashMap(struct { std.Build.LazyPath, ContentWriter.UsageHint }); + wfs: *std.Build.Step.WriteFile, code: std.ArrayList(u8).Writer, - vars: *std.StringHashMap(std.Build.LazyPath), - content: Content, -) !void { - // Always insert some padding before and after: - try code.writeAll(" "); - errdefer code.writeAll(" ") catch {}; - - switch (content) { - .empty => { - try code.writeAll("empty"); - }, + vars: *VariableMap, + + fn render(cw: ContentWriter, content: Content) !void { + // Always insert some padding before and after: + try cw.code.writeAll(" "); + errdefer cw.code.writeAll(" ") catch {}; + + switch (content) { + .empty => { + try cw.code.writeAll("empty"); + }, + + .fill => |data| { + try cw.code.print("fill 0x{X:0>2}", .{data}); + }, + + .paste_file => |data| { + try cw.code.print("paste-file {}", .{cw.fmtLazyPath(data, .file)}); + }, + + .mbr_part_table => |data| { + try cw.code.writeAll("mbr-part\n"); + + if (data.bootloader) |loader| { + try cw.code.writeAll(" bootloader "); + try cw.render(loader.*); + try cw.code.writeAll("\n"); + } + + for (data.partitions) |mpart| { + if (mpart) |part| { + try cw.code.writeAll(" part\n"); + if (part.bootable) { + try cw.code.print(" type {s}\n", .{@tagName(part.type)}); + try cw.code.writeAll(" bootable\n"); + if (part.offset) |offset| { + try cw.code.print(" offset {d}\n", .{offset}); + } + if (part.size) |size| { + try cw.code.print(" size {d}\n", .{size}); + } + try cw.code.writeAll(" contains"); + try cw.render(part.data); + try cw.code.writeAll("\n"); + } + try cw.code.writeAll(" endpart\n"); + } else { + try cw.code.writeAll(" ignore\n"); + } + } + }, + + .vfat => |data| { + try cw.code.print("vfat {s}\n", .{ + @tagName(data.format), + }); + if (data.label) |label| { + try cw.code.print(" label \"{}\"\n", .{ + fmtPath(label), + }); + } + + try cw.renderFileSystemTree(data.tree); + + try cw.code.writeAll("endfat\n"); + }, + } + } - .fill => |data| { - try code.print("fill 0x{X:0>2}", .{data}); - }, + fn renderFileSystemTree(cw: ContentWriter, fs: FileSystem) !void { + for (fs.items) |item| { + switch (item) { + .empty_dir => |dir| try cw.code.print("mkdir \"{}\"\n", .{ + fmtPath(dir), + }), + + .copy_dir => |copy| try cw.code.print("copy-dir \"{}\" {}\n", .{ + fmtPath(copy.destination), + cw.fmtLazyPath(copy.source, .directory), + }), + + .copy_file => |copy| try cw.code.print("copy-file \"{}\" {}\n", .{ + fmtPath(copy.destination), + cw.fmtLazyPath(copy.source, .file), + }), + + .include_script => |script| try cw.code.print("!include {}\n", .{ + cw.fmtLazyPath(script, .file), + }), + } + } + } - .paste_file => |data| { - try code.writeAll("paste-file "); - try renderLazyPath(wfs, code, vars, data); - }, + const PathFormatter = std.fmt.Formatter(formatPath); + const LazyPathFormatter = std.fmt.Formatter(formatLazyPath); + const UsageHint = enum { file, directory }; - .mbr_part_table => |data| { - _ = data; - @panic("not supported yet!"); - }, - .vfat => |data| { - _ = data; - @panic("not supported yet!"); - }, + fn fmtLazyPath(cw: ContentWriter, path: std.Build.LazyPath, hint: UsageHint) LazyPathFormatter { + return .{ .data = .{ cw, path, hint } }; } -} - -fn renderLazyPath( - wfs: *std.Build.Step.WriteFile, - code: std.ArrayList(u8).Writer, - vars: *std.StringHashMap(std.Build.LazyPath), - path: std.Build.LazyPath, -) !void { - switch (path) { - .cwd_relative, - .dependency, - .src_path, - => { - // We can safely call getPath2 as we can fully resolve the path - // already - const full_path = path.getPath2(wfs.step.owner, &wfs.step); - - std.debug.assert(std.fs.path.isAbsolute(full_path)); - - try code.writeAll(full_path); - }, - .generated => { - // this means we can't emit the variable just verbatim, but we - // actually have a build-time dependency - const var_id = vars.count() + 1; - const var_name = wfs.step.owner.fmt("PATH{}", .{var_id}); + fn fmtPath(path: []const u8) PathFormatter { + return .{ .data = path }; + } - try vars.put(var_name, path); + fn formatLazyPath( + data: struct { ContentWriter, std.Build.LazyPath, UsageHint }, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + const cw, const path, const hint = data; + _ = fmt; + _ = options; + + switch (path) { + .cwd_relative, + .dependency, + .src_path, + => { + // We can safely call getPath2 as we can fully resolve the path + // already + const full_path = path.getPath2(cw.wfs.step.owner, &cw.wfs.step); + + std.debug.assert(std.fs.path.isAbsolute(full_path)); + + try writer.print("\"{}\"", .{ + fmtPath(full_path), + }); + }, + + .generated => { + // this means we can't emit the variable just verbatim, but we + // actually have a build-time dependency + const var_id = cw.vars.count() + 1; + const var_name = cw.wfs.step.owner.fmt("PATH{}", .{var_id}); + + try cw.vars.put(var_name, .{ path, hint }); + + try writer.print("${s}", .{var_name}); + }, + } + } - try code.print("${s}", .{var_name}); - }, + fn formatPath( + path: []const u8, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + _ = fmt; + _ = options; + + const is_safe_word = for (path) |char| { + switch (char) { + 'A'...'Z', + 'a'...'z', + '0'...'9', + '_', + '-', + '/', + '.', + ':', + => {}, + else => break false, + } + } else true; + + if (is_safe_word) { + try writer.writeAll(path); + } else { + try writer.print("\"{}\"", .{ + std.fmt.fmtSliceEscapeLower(path), + }); + } } -} +}; pub const Content = union(enum) { empty, @@ -151,9 +287,110 @@ pub const Content = union(enum) { }; pub const MbrPartTable = struct { - // + bootloader: ?*const Content = null, + partitions: [4]?*const Partition, + + pub const Partition = struct { + type: enum { + empty, + fat12, + ntfs, + @"fat32-chs", + @"fat32-lba", + @"fat16-lba", + @"linux-swa", + @"linux-fs", + @"linux-lvm", + }, + bootable: bool = false, + size: ?u64 = null, + offset: ?u64 = null, + data: Content, + }; }; pub const FatFs = struct { - // + format: enum { + fat12, + fat16, + fat32, + } = .fat32, + + label: ?[]const u8 = null, + + // TODO: fats <fatcount> + // TODO: root-size <count> + // TODO: sector-align <align> + // TODO: cluster-size <size> + + tree: FileSystem, +}; + +pub const FileSystemBuilder = struct { + b: *std.Build, + list: std.ArrayListUnmanaged(FileSystem.Item), + + pub fn init(b: *std.Build) FileSystemBuilder { + return FileSystemBuilder{ + .b = b, + .list = .{}, + }; + } + + pub fn finalize(fsb: *FileSystemBuilder) FileSystem { + return .{ + .items = fsb.list.toOwnedSlice(fsb.b.allocator) catch @panic("out of memory"), + }; + } + + pub fn includeScript(fsb: *FileSystemBuilder, source: std.Build.LazyPath) void { + fsb.list.append(fsb.b.allocator, .{ + .include_script = source.dupe(fsb.b), + }) catch @panic("out of memory"); + } + + pub fn copyFile(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { + fsb.list.append(fsb.b.allocator, .{ + .copy_file = .{ + .source = source.dupe(fsb.b), + .destination = fsb.b.dupe(destination), + }, + }) catch @panic("out of memory"); + } + + pub fn copyDirectory(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { + fsb.list.append(fsb.b.allocator, .{ + .copy_dir = .{ + .source = source.dupe(fsb.b), + .destination = fsb.b.dupe(destination), + }, + }) catch @panic("out of memory"); + } + + pub fn mkdir(fsb: *FileSystemBuilder, destination: []const u8) void { + fsb.list.append(fsb.b.allocator, .{ + .empty_dir = fsb.b.dupe(destination), + }) catch @panic("out of memory"); + } +}; + +pub const FileSystem = struct { + pub const Copy = struct { + source: std.Build.LazyPath, + destination: []const u8, + }; + + pub const Item = union(enum) { + empty_dir: []const u8, + copy_dir: Copy, + copy_file: Copy, + include_script: std.Build.LazyPath, + }; + + // format: Format, + // label: []const u8, + items: []const Item, + + // private: + // executable: ?std.Build.LazyPath = null, }; diff --git a/src/dim.zig b/src/dim.zig index dba52da..aa82666 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -2,11 +2,22 @@ //! Disk Imager Command Line //! const std = @import("std"); +const builtin = @import("builtin"); const Tokenizer = @import("Tokenizer.zig"); const Parser = @import("Parser.zig"); const args = @import("args"); +pub const std_options: std.Options = .{ + .log_level = if (builtin.mode == .Debug) + .debug + else + .info, + .log_scope_levels = &.{ + .{ .scope = .fatfs, .level = .info }, + }, +}; + comptime { // Ensure zfat is linked to prevent compiler errors! _ = @import("zfat"); @@ -169,8 +180,11 @@ pub fn main() !u8 { pub fn declare_file_dependency(path: []const u8) !void { const deps_file = global_deps_file orelse return; - try deps_file.writeAll(" \\\n "); - try deps_file.writeAll(path); + const stat = try std.fs.cwd().statFile(path); + if (stat.kind != .directory) { + try deps_file.writeAll(" \\\n "); + try deps_file.writeAll(path); + } } fn fatal(msg: []const u8) noreturn {