diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 663b39bac9b..0a6198fb34e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -135,6 +135,13 @@ jobs: retry_on: error max_attempts: 3 command: ./hack/test-example.sh examples/experimental/9p.yaml + - name: "Test disk.yaml" + uses: nick-invision/retry@v2 + with: + timeout_minutes: 30 + retry_on: error + max_attempts: 3 + command: ./hack/test-example.sh examples/disk.yaml # GHA macOS is slow and flaky, so we only test a few YAMLS here. # Other yamls are tested on Linux instances of Cirrus. diff --git a/README.md b/README.md index da968ef2362..c77686e6407 100644 --- a/README.md +++ b/README.md @@ -224,6 +224,14 @@ Use `:` to specify a source or target inside an instance. #### `limactl edit` `limactl edit `: edit the instance +#### `limactl disk` + +`limactl disk create --size `: create a new external disk to attach to an instance + +`limactl disk delete `: delete an existing disk + +`limactl disk list`: list all existing disks + #### `limactl completion` - To enable bash completion, add `source <(limactl completion bash)` to `~/.bash_profile`. diff --git a/cmd/limactl/disk.go b/cmd/limactl/disk.go new file mode 100644 index 00000000000..62a2cbc0680 --- /dev/null +++ b/cmd/limactl/disk.go @@ -0,0 +1,305 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "text/tabwriter" + + "github.com/docker/go-units" + "github.com/lima-vm/lima/pkg/qemu" + "github.com/lima-vm/lima/pkg/store" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func newDiskCommand() *cobra.Command { + var diskCommand = &cobra.Command{ + Use: "disk", + Short: "Lima disk management", + Example: ` Create a disk: + $ limactl disk create DISK --size SIZE + + List existing disks: + $ limactl disk ls + + Delete a disk: + $ limactl disk delete DISK`, + SilenceUsage: true, + SilenceErrors: true, + } + diskCommand.AddCommand( + newDiskCreateCommand(), + newDiskListCommand(), + newDiskDeleteCommand(), + newDiskUnlockCommand(), + ) + return diskCommand +} + +func newDiskCreateCommand() *cobra.Command { + var diskCreateCommand = &cobra.Command{ + Use: "create DISK", + Example: ` +To create a new disk: +$ limactl disk create DISK --size SIZE +`, + Short: "Create a Lima disk", + Args: cobra.ExactArgs(1), + RunE: diskCreateAction, + } + diskCreateCommand.Flags().String("size", "", "configure the disk size") + diskCreateCommand.MarkFlagRequired("size") + return diskCreateCommand +} + +func diskCreateAction(cmd *cobra.Command, args []string) error { + size, err := cmd.Flags().GetString("size") + if err != nil { + return err + } + + diskSize, err := units.RAMInBytes(size) + if err != nil { + return err + } + + // only exactly one arg is allowed + name := args[0] + + diskDir, err := store.DiskDir(name) + if err != nil { + return err + } + + if _, err := os.Stat(diskDir); !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("disk %q already exists (%q)", name, diskDir) + } + + logrus.Infof("Creating a disk %q", name) + + if err := os.MkdirAll(diskDir, 0700); err != nil { + return err + } + + if err := qemu.CreateDataDisk(diskDir, int(diskSize)); err != nil { + return err + } + + return nil +} + +func newDiskListCommand() *cobra.Command { + var diskListCommand = &cobra.Command{ + Use: "list", + Example: ` +To list existing disks: +$ limactl disk list +`, + Short: "List existing Lima disks", + Aliases: []string{"ls"}, + Args: cobra.NoArgs, + RunE: diskListAction, + } + diskListCommand.Flags().Bool("json", false, "JSONify output") + return diskListCommand +} + +func diskListAction(cmd *cobra.Command, args []string) error { + jsonFormat, err := cmd.Flags().GetBool("json") + if err != nil { + return err + } + + allDisks, err := store.Disks() + if err != nil { + return err + } + + if jsonFormat { + for _, diskName := range allDisks { + disk, err := store.InspectDisk(diskName) + if err != nil { + logrus.WithError(err).Errorf("disk %q does not exist?", diskName) + continue + } + j, err := json.Marshal(disk) + if err != nil { + return err + } + fmt.Fprintln(cmd.OutOrStdout(), string(j)) + } + return nil + } + + w := tabwriter.NewWriter(cmd.OutOrStdout(), 4, 8, 4, ' ', 0) + fmt.Fprintln(w, "NAME\tSIZE\tDIR\tIN USE BY") + + if len(allDisks) == 0 { + logrus.Warn("No disk found. Run `limactl disk create DISK --size SIZE` to create a disk.") + } + + for _, diskName := range allDisks { + disk, err := store.InspectDisk(diskName) + if err != nil { + logrus.WithError(err).Errorf("disk %q does not exist?", diskName) + continue + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", disk.Name, units.BytesSize(float64(disk.Size)), disk.Dir, disk.Instance) + } + + return w.Flush() +} + +func newDiskDeleteCommand() *cobra.Command { + var diskDeleteCommand = &cobra.Command{ + Use: "delete DISK [DISK, ...]", + Example: ` +To delete a disk: +$ limactl disk delete DISK + +To delete multiple disks: +$ limactl disk delete DISK1 DISK2 ... +`, + Aliases: []string{"remove", "rm"}, + Short: "Delete one or more Lima disks", + Args: cobra.MinimumNArgs(1), + RunE: diskDeleteAction, + } + diskDeleteCommand.Flags().Bool("force", false, "force delete") + return diskDeleteCommand +} + +func diskDeleteAction(cmd *cobra.Command, args []string) error { + force, err := cmd.Flags().GetBool("force") + if err != nil { + return err + } + + for _, diskName := range args { + if force { + disk, err := store.InspectDisk(diskName) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + logrus.Warnf("Ignoring non-existent disk %q", diskName) + continue + } + return err + } + + if err := deleteDisk(disk); err != nil { + return fmt.Errorf("failed to delete disk %q: %w", diskName, err) + } + logrus.Infof("Deleted %q (%q)", diskName, disk.Dir) + continue + } + + disk, err := store.InspectDisk(diskName) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + logrus.Warnf("Ignoring non-existent disk %q", diskName) + continue + } + return err + } + if disk.Instance != "" { + return fmt.Errorf("cannot delete disk %q in use by instance %q", disk.Name, disk.Instance) + } + instances, err := store.Instances() + if err != nil { + return err + } + var refInstances []string + for _, instName := range instances { + inst, err := store.Inspect(instName) + if err != nil { + continue + } + if len(inst.AdditionalDisks) > 0 { + for _, d := range inst.AdditionalDisks { + if d == diskName { + refInstances = append(refInstances, instName) + } + } + } + } + if len(refInstances) > 0 { + logrus.Warnf("Skipping deleting disk %q, disk is referenced by one or more non-running instances: %q", + diskName, refInstances) + logrus.Warnf("To delete anyway, run %q", forceDeleteCommand(diskName)) + continue + } + if err := deleteDisk(disk); err != nil { + return fmt.Errorf("failed to delete disk %q: %v", diskName, err) + } + logrus.Infof("Deleted %q (%q)", diskName, disk.Dir) + } + return nil +} + +func deleteDisk(disk *store.Disk) error { + if err := os.RemoveAll(disk.Dir); err != nil { + return fmt.Errorf("failed to remove %q: %w", disk.Dir, err) + } + return nil +} + +func forceDeleteCommand(diskName string) string { + return fmt.Sprintf("limactl disk delete --force %v", diskName) +} + +func newDiskUnlockCommand() *cobra.Command { + var diskUnlockCommand = &cobra.Command{ + Use: "unlock DISK [DISK, ...]", + Example: ` +Emergency recovery! If an instance is force stopped, it may leave a disk locked while not actually using it. + +To unlock a disk: +$ limactl disk unlock DISK + +To unlock multiple disks: +$ limactl disk unlock DISK1 DISK2 ... +`, + Short: "Unlock one or more Lima disks", + Args: cobra.MinimumNArgs(1), + RunE: diskUnlockAction, + } + return diskUnlockCommand +} + +func diskUnlockAction(cmd *cobra.Command, args []string) error { + for _, diskName := range args { + disk, err := store.InspectDisk(diskName) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + logrus.Warnf("Ignoring non-existent disk %q", diskName) + continue + } + return err + } + if disk.Instance == "" { + logrus.Warnf("Ignoring unlocked disk %q", diskName) + continue + } + // if store.Inspect throws an error, the instance does not exist, and it is safe to unlock + inst, err := store.Inspect(disk.Instance) + if err == nil { + if len(inst.Errors) > 0 { + logrus.Warnf("Cannot unlock disk %q, attached instance %q has errors: %+v", + diskName, disk.Instance, inst.Errors) + continue + } + if inst.Status == store.StatusRunning { + logrus.Warnf("Cannot unlock disk %q used by running instance %q", diskName, disk.Instance) + continue + } + } + if err := disk.Unlock(); err != nil { + return fmt.Errorf("failed to unlock disk %q: %w", diskName, err) + } + logrus.Infof("Unlocked disk %q (%q)", diskName, disk.Dir) + } + return nil +} diff --git a/cmd/limactl/main.go b/cmd/limactl/main.go index 9c1e2004101..42b5efe36b6 100644 --- a/cmd/limactl/main.go +++ b/cmd/limactl/main.go @@ -92,6 +92,7 @@ func newApp() *cobra.Command { newDebugCommand(), newEditCommand(), newFactoryResetCommand(), + newDiskCommand(), ) return rootCmd } diff --git a/cmd/limactl/stop.go b/cmd/limactl/stop.go index bf5ecd1480b..879c379c110 100644 --- a/cmd/limactl/stop.go +++ b/cmd/limactl/stop.go @@ -113,6 +113,17 @@ func stopInstanceForcibly(inst *store.Instance) { logrus.Info("The QEMU process seems already stopped") } + for _, diskName := range inst.AdditionalDisks { + disk, err := store.InspectDisk(diskName) + if err != nil { + logrus.Warnf("Disk %q does not exist", diskName) + continue + } + if err := disk.Unlock(); err != nil { + logrus.Warnf("Failed to unlock disk %q. To use, run `limactl disk unlock %v`", diskName, diskName) + } + } + if inst.HostAgentPID > 0 { logrus.Infof("Sending SIGKILL to the host agent process %d", inst.HostAgentPID) if err := osutil.SysKill(inst.HostAgentPID, osutil.SigKill); err != nil { diff --git a/docs/internal.md b/docs/internal.md index 05ef9161b9e..acc78a3fcfd 100644 --- a/docs/internal.md +++ b/docs/internal.md @@ -59,6 +59,16 @@ Host agent: - `ha.stdout.log`: hostagent stdout (JSON lines, see `pkg/hostagent/events.Event`) - `ha.stderr.log`: hostagent stderr (human-readable messages) +## Disk directory (`${LIMA_HOME}/_disk/`) + +A disk directory contains the following files: + +data disk: +- `datadisk`: the qcow2 disk that is attached to an instance + +lock: +- `in_use_by`: symlink to the instance directory that is using the disk + ## Lima cache directory (`~/Library/Caches/lima`) Currently hard-coded to `~/Library/Caches/lima` on macOS. diff --git a/examples/default.yaml b/examples/default.yaml index 990280d01db..cf004e2cd13 100644 --- a/examples/default.yaml +++ b/examples/default.yaml @@ -92,6 +92,15 @@ mounts: # 🟢 Builtin default: "reverse-sshfs" mountType: null +# Lima disks to attach to the instance. The disks will be accessible from inside the +# instance, labeled by name. (e.g. if the disk is named "data", it will be labeled +# "lima-data" inside the instance). The disk will be mounted inside the instance at +# `/mnt/lima-${VOLUME}`. +# 🟢 Builtin default: null +additionalDisks: +# disks should be a list of disk name strings, for example: +# - "data" + ssh: # A localhost port of the host. Forwarded to port 22 of the guest. # 🟢 Builtin default: 0 (automatically assigned to a free port) diff --git a/examples/disk.yaml b/examples/disk.yaml new file mode 100644 index 00000000000..45c16228ef2 --- /dev/null +++ b/examples/disk.yaml @@ -0,0 +1,25 @@ +# This example requires Lima v0.14.0 or later. +images: +# Try to use release-yyyyMMdd image if available. Note that release-yyyyMMdd will be removed after several months. +- location: "https://cloud-images.ubuntu.com/releases/22.04/release-20220902/ubuntu-22.04-server-cloudimg-amd64.img" + arch: "x86_64" + digest: "sha256:c777670007cc5f132417b9e0bc01367ccfc2a989951ffa225bb1952917c3aa81" +- location: "https://cloud-images.ubuntu.com/releases/22.04/release-20220902/ubuntu-22.04-server-cloudimg-arm64.img" + arch: "aarch64" + digest: "sha256:9620f479bd5a6cbf1e805654d41b27f4fc56ef20f916c8331558241734de81ae" +# Fallback to the latest release image. +# Hint: run `limactl prune` to invalidate the cache +- location: "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.img" + arch: "x86_64" +- location: "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-arm64.img" + arch: "aarch64" + +mounts: +- location: "~" +- location: "/tmp/lima" + writable: true + +# in order to use this example, you must first create the disk "data". run: +# $ limactl disk create data --size 10G +additionalDisks: +- "data" diff --git a/hack/test-example.sh b/hack/test-example.sh index ecb5f0d7829..1a01172dd21 100755 --- a/hack/test-example.sh +++ b/hack/test-example.sh @@ -24,6 +24,7 @@ declare -A CHECKS=( ["restart"]="1" ["port-forwards"]="1" ["vmnet"]="" + ["disk"]="" ) case "$NAME" in @@ -45,6 +46,9 @@ case "$NAME" in "vmnet") CHECKS["vmnet"]=1 ;; +"disk") + CHECKS["disk"]=1 + ;; esac if limactl ls -q | grep -q "$NAME"; then @@ -78,6 +82,14 @@ export ftp_proxy=http://localhost:2121 INFO "Starting \"$NAME\" from \"$FILE\"" defer "limactl delete -f \"$NAME\"" + +if [[ -n ${CHECKS["disk"]} ]]; then + if ! limactl disk ls | grep -q "^data\s"; then + defer "limactl disk delete data" + limactl disk create data --size 10G + fi +fi + set -x if ! limactl start --tty=false "$FILE"; then ERROR "Failed to start \"$NAME\"" @@ -237,10 +249,24 @@ if [[ -n ${CHECKS["vmnet"]} ]]; then # NOTE: we only test the shared interface here, as the bridged interface cannot be used on GHA (and systemd-networkd-wait-online.service will fail) fi +if [[ -n ${CHECKS["disk"]} ]]; then + INFO "Testing disk is attached" + set -x + if ! limactl shell "$NAME" lsblk --output NAME,MOUNTPOINT | grep -q "/mnt/lima-data"; then + ERROR "Disk is not mounted" + exit 1 + fi + set +x +fi + if [[ -n ${CHECKS["restart"]} ]]; then INFO "Create file in the guest home directory and verify that it still exists after a restart" # shellcheck disable=SC2016 limactl shell "$NAME" sh -c 'touch $HOME/sweet-home' + if [[ -n ${CHECKS["disk"]} ]]; then + INFO "Create file in disk and verify that it still exists when it is reattached" + limactl shell "$NAME" sudo sh -c 'touch /mnt/lima-data/sweet-disk' + fi INFO "Stopping \"$NAME\"" limactl stop "$NAME" @@ -264,6 +290,13 @@ if [[ -n ${CHECKS["restart"]} ]]; then ERROR "Guest home directory does not persist across restarts" exit 1 fi + + if [[ -n ${CHECKS["disk"]} ]]; then + if ! limactl shell "$NAME" sh -c 'test -f /mnt/lima-data/sweet-disk'; then + ERROR "Disk does not persist across restarts" + exit 1 + fi + fi fi INFO "Stopping \"$NAME\"" diff --git a/pkg/cidata/cidata.TEMPLATE.d/boot.sh b/pkg/cidata/cidata.TEMPLATE.d/boot.sh index 1ec1ee09a03..b2fab88ed3a 100644 --- a/pkg/cidata/cidata.TEMPLATE.d/boot.sh +++ b/pkg/cidata/cidata.TEMPLATE.d/boot.sh @@ -30,7 +30,7 @@ export PATH CODE=0 -# Don't make any changes to /etc or /var/lib until boot/05-persistent-data-volume.sh +# Don't make any changes to /etc or /var/lib until boot/04-persistent-data-volume.sh # has run because it might move the directories to /mnt/data on first boot. In that # case changes made on restart would be lost. diff --git a/pkg/cidata/cidata.TEMPLATE.d/boot/05-persistent-data-volume.sh b/pkg/cidata/cidata.TEMPLATE.d/boot/04-persistent-data-volume.sh similarity index 100% rename from pkg/cidata/cidata.TEMPLATE.d/boot/05-persistent-data-volume.sh rename to pkg/cidata/cidata.TEMPLATE.d/boot/04-persistent-data-volume.sh diff --git a/pkg/cidata/cidata.TEMPLATE.d/boot/05-lima-disks.sh b/pkg/cidata/cidata.TEMPLATE.d/boot/05-lima-disks.sh new file mode 100644 index 00000000000..eef49937f85 --- /dev/null +++ b/pkg/cidata/cidata.TEMPLATE.d/boot/05-lima-disks.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -eux -o pipefail + +test "$LIMA_CIDATA_DISKS" -gt 0 || exit 0 + +get_disk_var() { + diskvarname="LIMA_CIDATA_DISK_${1}_${2}" + eval echo \$"$diskvarname" +} + +for i in $(seq 0 $((LIMA_CIDATA_DISKS - 1))); do + DISK_NAME="$(get_disk_var "$i" "NAME")" + DEVICE_NAME="$(get_disk_var "$i" "DEVICE")" + + # first time setup + if [[ ! -b "/dev/disk/by-label/lima-${DISK_NAME}" ]]; then + # TODO: skip if disk is tagged as "raw" + echo 'type=linux' | sfdisk --label gpt "/dev/${DEVICE_NAME}" + mkfs.ext4 -L "lima-${DISK_NAME}" "/dev/${DEVICE_NAME}1" + fi + + mkdir -p "/mnt/lima-${DISK_NAME}" + mount -t ext4 "/dev/${DEVICE_NAME}1" "/mnt/lima-${DISK_NAME}" +done diff --git a/pkg/cidata/cidata.TEMPLATE.d/boot/07-etc-environment.sh b/pkg/cidata/cidata.TEMPLATE.d/boot/07-etc-environment.sh index f5a838948f3..4cd8b07cbe5 100644 --- a/pkg/cidata/cidata.TEMPLATE.d/boot/07-etc-environment.sh +++ b/pkg/cidata/cidata.TEMPLATE.d/boot/07-etc-environment.sh @@ -1,7 +1,7 @@ #!/bin/sh set -eux -# /etc/environment must be written after 05-persistent-data-volume.sh has run to +# /etc/environment must be written after 04-persistent-data-volume.sh has run to # make sure the changes on a restart are applied to the persisted version. if [ -e /etc/environment ]; then diff --git a/pkg/cidata/cidata.TEMPLATE.d/lima.env b/pkg/cidata/cidata.TEMPLATE.d/lima.env index 59d881275b8..a688ff09b37 100644 --- a/pkg/cidata/cidata.TEMPLATE.d/lima.env +++ b/pkg/cidata/cidata.TEMPLATE.d/lima.env @@ -7,6 +7,11 @@ LIMA_CIDATA_MOUNTS={{ len .Mounts }} LIMA_CIDATA_MOUNTS_{{$i}}_MOUNTPOINT={{$val.MountPoint}} {{- end}} LIMA_CIDATA_MOUNTTYPE={{ .MountType }} +LIMA_CIDATA_DISKS={{ len .Disks }} +{{- range $i, $disk := .Disks}} +LIMA_CIDATA_DISK_{{$i}}_NAME={{$disk.Name}} +LIMA_CIDATA_DISK_{{$i}}_DEVICE={{$disk.Device}} +{{- end}} {{- if .Containerd.User}} LIMA_CIDATA_CONTAINERD_USER=1 {{- else}} diff --git a/pkg/cidata/cidata.go b/pkg/cidata/cidata.go index b1b3d41c5a3..1b6df73ef9b 100644 --- a/pkg/cidata/cidata.go +++ b/pkg/cidata/cidata.go @@ -192,6 +192,13 @@ func GenerateISO9660(instDir, name string, y *limayaml.LimaYAML, udpDNSLocalPort args.MountType = "9p" } + for i, disk := range y.AdditionalDisks { + args.Disks = append(args.Disks, Disk{ + Name: disk, + Device: diskDeviceNameFromOrder(i), + }) + } + slirpMACAddress := limayaml.MACAddress(instDir) args.Networks = append(args.Networks, Network{MACAddress: slirpMACAddress, Interface: qemu.SlirpNICName}) for _, nw := range y.Networks { @@ -330,3 +337,7 @@ func getBootCmds(p []limayaml.Provision) []BootCmds { } return bootCmds } + +func diskDeviceNameFromOrder(order int) string { + return fmt.Sprintf("vd%c", int('b')+order) +} diff --git a/pkg/cidata/template.go b/pkg/cidata/template.go index d023d576ef1..ce6de21e4ff 100644 --- a/pkg/cidata/template.go +++ b/pkg/cidata/template.go @@ -45,6 +45,10 @@ type Mount struct { type BootCmds struct { Lines []string } +type Disk struct { + Name string + Device string +} type TemplateArgs struct { Name string // instance name IID string // instance id @@ -53,6 +57,7 @@ type TemplateArgs struct { SSHPubKeys []string Mounts []Mount MountType string + Disks []Disk Containerd Containerd Networks []Network SlirpNICName string diff --git a/pkg/hostagent/hostagent.go b/pkg/hostagent/hostagent.go index 50bbb42f1bd..a780eb97eaf 100644 --- a/pkg/hostagent/hostagent.go +++ b/pkg/hostagent/hostagent.go @@ -466,6 +466,23 @@ func (a *HostAgent) startHostAgentRoutines(ctx context.Context) error { return unmountMErr }) } + if len(a.y.AdditionalDisks) > 0 { + a.onClose = append(a.onClose, func() error { + var unlockMErr error + for _, d := range a.y.AdditionalDisks { + disk, inspectErr := store.InspectDisk(d) + if inspectErr != nil { + unlockMErr = multierror.Append(unlockMErr, inspectErr) + continue + } + logrus.Infof("Unmounting disk %q", disk.Name) + if unlockErr := disk.Unlock(); unlockErr != nil { + unlockMErr = multierror.Append(unlockMErr, unlockErr) + } + } + return unlockMErr + }) + } go a.watchGuestAgentEvents(ctx) if err := a.waitForRequirements(ctx, "optional", a.optionalRequirements()); err != nil { mErr = multierror.Append(mErr, err) diff --git a/pkg/limayaml/defaults.go b/pkg/limayaml/defaults.go index 536adff019e..35290521348 100644 --- a/pkg/limayaml/defaults.go +++ b/pkg/limayaml/defaults.go @@ -161,6 +161,8 @@ func FillDefault(y, d, o *LimaYAML, filePath string) { y.Disk = pointer.String("100GiB") } + y.AdditionalDisks = append(append(o.AdditionalDisks, y.AdditionalDisks...), d.AdditionalDisks...) + if y.Video.Display == nil { y.Video.Display = d.Video.Display } diff --git a/pkg/limayaml/defaults_test.go b/pkg/limayaml/defaults_test.go index 6a98fb41297..9f9281a4aa2 100644 --- a/pkg/limayaml/defaults_test.go +++ b/pkg/limayaml/defaults_test.go @@ -227,6 +227,9 @@ func TestFillDefault(t *testing.T) { CPUs: pointer.Int(7), Memory: pointer.String("5GiB"), Disk: pointer.String("105GiB"), + AdditionalDisks: []Disk{ + "data", + }, Containerd: Containerd{ System: pointer.Bool(true), User: pointer.Bool(false), @@ -336,6 +339,7 @@ func TestFillDefault(t *testing.T) { y = filledDefaults y.DNS = []net.IP{net.ParseIP("8.8.8.8")} + y.AdditionalDisks = []Disk{"overridden"} expect = y @@ -343,6 +347,7 @@ func TestFillDefault(t *testing.T) { expect.Probes = append(y.Probes, d.Probes...) expect.PortForwards = append(y.PortForwards, d.PortForwards...) expect.Containerd.Archives = append(y.Containerd.Archives, d.Containerd.Archives...) + expect.AdditionalDisks = append(y.AdditionalDisks, d.AdditionalDisks...) // Mounts and Networks start with lowest priority first, so higher priority entries can overwrite expect.Mounts = append(d.Mounts, y.Mounts...) @@ -371,6 +376,9 @@ func TestFillDefault(t *testing.T) { CPUs: pointer.Int(12), Memory: pointer.String("7GiB"), Disk: pointer.String("117GiB"), + AdditionalDisks: []Disk{ + "test", + }, Containerd: Containerd{ System: pointer.Bool(true), User: pointer.Bool(false), @@ -473,6 +481,7 @@ func TestFillDefault(t *testing.T) { expect.Probes = append(append(o.Probes, y.Probes...), d.Probes...) expect.PortForwards = append(append(o.PortForwards, y.PortForwards...), d.PortForwards...) expect.Containerd.Archives = append(append(o.Containerd.Archives, y.Containerd.Archives...), d.Containerd.Archives...) + expect.AdditionalDisks = append(append(o.AdditionalDisks, y.AdditionalDisks...), d.AdditionalDisks...) expect.HostResolver.Hosts["default"] = d.HostResolver.Hosts["default"] expect.HostResolver.Hosts["MY.Host"] = d.HostResolver.Hosts["host.lima.internal"] diff --git a/pkg/limayaml/limayaml.go b/pkg/limayaml/limayaml.go index bb7622fc3eb..af6d0457afd 100644 --- a/pkg/limayaml/limayaml.go +++ b/pkg/limayaml/limayaml.go @@ -13,6 +13,7 @@ type LimaYAML struct { CPUs *int `yaml:"cpus,omitempty" json:"cpus,omitempty"` Memory *string `yaml:"memory,omitempty" json:"memory,omitempty"` // go-units.RAMInBytes Disk *string `yaml:"disk,omitempty" json:"disk,omitempty"` // go-units.RAMInBytes + AdditionalDisks []Disk `yaml:"additionalDisks,omitempty" json:"additionalDisks,omitempty"` Mounts []Mount `yaml:"mounts,omitempty" json:"mounts,omitempty"` MountType *MountType `yaml:"mountType,omitempty" json:"mountType,omitempty"` SSH SSH `yaml:"ssh,omitempty" json:"ssh,omitempty"` // REQUIRED (FIXME) @@ -62,6 +63,8 @@ type Image struct { Initrd *File `yaml:"initrd,omitempty" json:"initrd,omitempty"` } +type Disk = string + type Mount struct { Location string `yaml:"location" json:"location"` // REQUIRED MountPoint string `yaml:"mountPoint,omitempty" json:"mountPoint,omitempty"` diff --git a/pkg/qemu/imgutil/imgutil.go b/pkg/qemu/imgutil/imgutil.go index 582d0e65bbe..ec39b414cfb 100644 --- a/pkg/qemu/imgutil/imgutil.go +++ b/pkg/qemu/imgutil/imgutil.go @@ -12,6 +12,7 @@ import ( // Info corresponds to the output of `qemu-img info --output=json FILE` type Info struct { Format string `json:"format,omitempty"` // since QEMU 1.3 + VSize int64 `json:"virtual-size,omitempty"` } func GetInfo(f string) (*Info, error) { diff --git a/pkg/qemu/qemu.go b/pkg/qemu/qemu.go index 9b256ed946b..2e0050c4157 100644 --- a/pkg/qemu/qemu.go +++ b/pkg/qemu/qemu.go @@ -22,6 +22,7 @@ import ( "github.com/lima-vm/lima/pkg/networks" qemu "github.com/lima-vm/lima/pkg/qemu/const" "github.com/lima-vm/lima/pkg/qemu/imgutil" + "github.com/lima-vm/lima/pkg/store" "github.com/lima-vm/lima/pkg/store/filenames" "github.com/mattn/go-shellwords" "github.com/sirupsen/logrus" @@ -128,6 +129,21 @@ func EnsureDisk(cfg Config) error { return nil } +func CreateDataDisk(dir string, size int) error { + dataDisk := filepath.Join(dir, filenames.DataDisk) + if _, err := os.Stat(dataDisk); err == nil || !errors.Is(err, fs.ErrNotExist) { + // datadisk already exists + return err + } + + args := []string{"create", "-f", "qcow2", dataDisk, strconv.Itoa(size)} + cmd := exec.Command("qemu-img", args...) + if out, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to run %v: %q: %w", cmd.Args, string(out), err) + } + return nil +} + func argValue(args []string, key string) (string, bool) { if !strings.HasPrefix(key, "-") { panic(fmt.Errorf("got unexpected key %q", key)) @@ -422,6 +438,30 @@ func Cmdline(cfg Config) (string, []string, error) { // Disk baseDisk := filepath.Join(cfg.InstanceDir, filenames.BaseDisk) diffDisk := filepath.Join(cfg.InstanceDir, filenames.DiffDisk) + extraDisks := []string{} + if len(y.AdditionalDisks) > 0 { + for _, diskName := range y.AdditionalDisks { + d, err := store.InspectDisk(diskName) + if err != nil { + logrus.Errorf("could not load disk %q: %q", diskName, err) + return "", nil, err + } + + if d.Instance != "" { + logrus.Errorf("could not attach disk %q, in use by instance %q", diskName, d.Instance) + return "", nil, err + } + logrus.Infof("Mounting disk %q on %q", diskName, d.MountPoint) + err = d.Lock(cfg.InstanceDir) + if err != nil { + logrus.Errorf("could not lock disk %q: %q", diskName, err) + return "", nil, err + } + dataDisk := filepath.Join(d.Dir, filenames.DataDisk) + extraDisks = append(extraDisks, dataDisk) + } + } + isBaseDiskCDROM, err := iso9660util.IsISO9660(baseDisk) if err != nil { return "", nil, err @@ -437,6 +477,10 @@ func Cmdline(cfg Config) (string, []string, error) { } else if !isBaseDiskCDROM { args = append(args, "-drive", fmt.Sprintf("file=%s,if=virtio,discard=on", baseDisk)) } + for _, extraDisk := range extraDisks { + args = append(args, "-drive", fmt.Sprintf("file=%s,if=virtio,discard=on", extraDisk)) + } + // cloud-init switch *y.Arch { case limayaml.RISCV64: diff --git a/pkg/store/dirnames/dirnames.go b/pkg/store/dirnames/dirnames.go index bc7f814d71c..01ea8ba73dc 100644 --- a/pkg/store/dirnames/dirnames.go +++ b/pkg/store/dirnames/dirnames.go @@ -53,3 +53,12 @@ func LimaNetworksDir() (string, error) { } return filepath.Join(limaDir, filenames.NetworksDir), nil } + +// LimaDisksDir returns the path of the disks directory, $LIMA_HOME/_disks. +func LimaDisksDir() (string, error) { + limaDir, err := LimaDir() + if err != nil { + return "", err + } + return filepath.Join(limaDir, filenames.DisksDir), nil +} diff --git a/pkg/store/disk.go b/pkg/store/disk.go new file mode 100644 index 00000000000..f6c1cb84a03 --- /dev/null +++ b/pkg/store/disk.go @@ -0,0 +1,71 @@ +package store + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/lima-vm/lima/pkg/qemu/imgutil" + "github.com/lima-vm/lima/pkg/store/filenames" +) + +type Disk struct { + Name string `json:"name"` + Size int64 `json:"size"` + Dir string `json:"dir"` + Instance string `json:"instance"` + InstanceDir string `json:"instanceDir"` + MountPoint string `json:"mountPoint"` +} + +func InspectDisk(diskName string) (*Disk, error) { + disk := &Disk{ + Name: diskName, + } + + diskDir, err := DiskDir(diskName) + if err != nil { + return nil, err + } + + disk.Dir = diskDir + dataDisk := filepath.Join(diskDir, filenames.DataDisk) + if _, err := os.Stat(dataDisk); err != nil { + return nil, err + } + + info, err := imgutil.GetInfo(dataDisk) + if err != nil { + return nil, err + } + disk.Size = info.VSize + + instDir, err := os.Readlink(filepath.Join(diskDir, filenames.InUseBy)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + disk.Instance = "" + disk.InstanceDir = "" + } else { + return nil, err + } + } else { + disk.Instance = filepath.Base(instDir) + disk.InstanceDir = instDir + } + + disk.MountPoint = fmt.Sprintf("/mnt/lima-%s", diskName) + + return disk, nil +} + +func (d *Disk) Lock(instanceDir string) error { + inUseBy := filepath.Join(d.Dir, filenames.InUseBy) + return os.Symlink(instanceDir, inUseBy) +} + +func (d *Disk) Unlock() error { + inUseBy := filepath.Join(d.Dir, filenames.InUseBy) + return os.Remove(inUseBy) +} diff --git a/pkg/store/filenames/filenames.go b/pkg/store/filenames/filenames.go index fc3be1dec2f..390a6411bd7 100644 --- a/pkg/store/filenames/filenames.go +++ b/pkg/store/filenames/filenames.go @@ -10,6 +10,7 @@ const ( ConfigDir = "_config" CacheDir = "_cache" // not yet implemented NetworksDir = "_networks" // network log files are stored here + DisksDir = "_disks" // disks are stored here ) // Filenames used inside the ConfigDir @@ -47,6 +48,13 @@ const ( SocketDir = "sock" ) +// Filenames used under a disk directory + +const ( + DataDisk = "datadisk" + InUseBy = "in_use_by" +) + // LongestSock is the longest socket name. // On macOS, the full path of the socket (excluding the NUL terminator) must be less than 104 characters. // See unix(4). diff --git a/pkg/store/instance.go b/pkg/store/instance.go index 7173df8393d..5fdc7bee9d1 100644 --- a/pkg/store/instance.go +++ b/pkg/store/instance.go @@ -30,20 +30,21 @@ const ( ) type Instance struct { - Name string `json:"name"` - Status Status `json:"status"` - Dir string `json:"dir"` - Arch limayaml.Arch `json:"arch"` - CPUType string `json:"cpuType"` - CPUs int `json:"cpus,omitempty"` - Memory int64 `json:"memory,omitempty"` // bytes - Disk int64 `json:"disk,omitempty"` // bytes - Message string `json:"message,omitempty"` - Networks []limayaml.Network `json:"network,omitempty"` - SSHLocalPort int `json:"sshLocalPort,omitempty"` - HostAgentPID int `json:"hostAgentPID,omitempty"` - QemuPID int `json:"qemuPID,omitempty"` - Errors []error `json:"errors,omitempty"` + Name string `json:"name"` + Status Status `json:"status"` + Dir string `json:"dir"` + Arch limayaml.Arch `json:"arch"` + CPUType string `json:"cpuType"` + CPUs int `json:"cpus,omitempty"` + Memory int64 `json:"memory,omitempty"` // bytes + Disk int64 `json:"disk,omitempty"` // bytes + Message string `json:"message,omitempty"` + AdditionalDisks []limayaml.Disk `json:"additionalDisks,omitempty"` + Networks []limayaml.Network `json:"network,omitempty"` + SSHLocalPort int `json:"sshLocalPort,omitempty"` + HostAgentPID int `json:"hostAgentPID,omitempty"` + QemuPID int `json:"qemuPID,omitempty"` + Errors []error `json:"errors,omitempty"` } func (inst *Instance) LoadYAML() (*limayaml.LimaYAML, error) { @@ -89,6 +90,7 @@ func Inspect(instName string) (*Instance, error) { if err == nil { inst.Disk = disk } + inst.AdditionalDisks = y.AdditionalDisks inst.Networks = y.Networks inst.SSHLocalPort = *y.SSH.LocalPort // maybe 0 diff --git a/pkg/store/store.go b/pkg/store/store.go index efcbf30e0ee..d3ac2a09370 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -37,6 +37,25 @@ func Instances() ([]string, error) { return names, nil } +func Disks() ([]string, error) { + limaDiskDir, err := dirnames.LimaDisksDir() + if err != nil { + return nil, err + } + limaDiskDirList, err := os.ReadDir(limaDiskDir) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err + } + var names []string + for _, f := range limaDiskDirList { + names = append(names, f.Name()) + } + return names, nil +} + // InstanceDir returns the instance dir. // InstanceDir does not check whether the instance exists func InstanceDir(name string) (string, error) { @@ -51,6 +70,18 @@ func InstanceDir(name string) (string, error) { return dir, nil } +func DiskDir(name string) (string, error) { + if err := identifiers.Validate(name); err != nil { + return "", err + } + limaDisksDir, err := dirnames.LimaDisksDir() + if err != nil { + return "", err + } + dir := filepath.Join(limaDisksDir, name) + return dir, nil +} + // LoadYAMLByFilePath loads and validates the yaml. func LoadYAMLByFilePath(filePath string) (*limayaml.LimaYAML, error) { // We need to use the absolute path because it may be used to determine hostSocket locations.