Skip to content

Add disk command and allows multiple additional disks #1065

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,13 @@ jobs:
retry_on: error
max_attempts: 3
command: ./hack/test-example.sh examples/experimental/9p.yaml
- name: "Test disk.yaml"
uses: nick-invision/retry@v2
with:
timeout_minutes: 30
retry_on: error
max_attempts: 3
command: ./hack/test-example.sh examples/disk.yaml
# GHA macOS is slow and flaky, so we only test a few YAMLS here.
# Other yamls are tested on Linux instances of Cirrus.

Expand Down
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,14 @@ Use `<INSTANCE>:<FILENAME>` to specify a source or target inside an instance.
#### `limactl edit`
`limactl edit <INSTANCE>`: edit the instance

#### `limactl disk`

`limactl disk create <DISK> --size <SIZE>`: create a new external disk to attach to an instance

`limactl disk delete <DISK>`: delete an existing disk

`limactl disk list`: list all existing disks

#### `limactl completion`
- To enable bash completion, add `source <(limactl completion bash)` to `~/.bash_profile`.

Expand Down
305 changes: 305 additions & 0 deletions cmd/limactl/disk.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,305 @@
package main

import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"text/tabwriter"

"github.com/docker/go-units"
"github.com/lima-vm/lima/pkg/qemu"
"github.com/lima-vm/lima/pkg/store"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)

func newDiskCommand() *cobra.Command {
var diskCommand = &cobra.Command{
Use: "disk",
Short: "Lima disk management",
Example: ` Create a disk:
$ limactl disk create DISK --size SIZE

List existing disks:
$ limactl disk ls

Delete a disk:
$ limactl disk delete DISK`,
SilenceUsage: true,
SilenceErrors: true,
}
diskCommand.AddCommand(
newDiskCreateCommand(),
newDiskListCommand(),
newDiskDeleteCommand(),
newDiskUnlockCommand(),
)
return diskCommand
}

func newDiskCreateCommand() *cobra.Command {
var diskCreateCommand = &cobra.Command{
Use: "create DISK",
Example: `
To create a new disk:
$ limactl disk create DISK --size SIZE
`,
Short: "Create a Lima disk",
Args: cobra.ExactArgs(1),
RunE: diskCreateAction,
}
diskCreateCommand.Flags().String("size", "", "configure the disk size")
diskCreateCommand.MarkFlagRequired("size")
return diskCreateCommand
}

func diskCreateAction(cmd *cobra.Command, args []string) error {
size, err := cmd.Flags().GetString("size")
if err != nil {
return err
}

diskSize, err := units.RAMInBytes(size)
if err != nil {
return err
}

// only exactly one arg is allowed
name := args[0]

diskDir, err := store.DiskDir(name)
if err != nil {
return err
}

if _, err := os.Stat(diskDir); !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("disk %q already exists (%q)", name, diskDir)
}

logrus.Infof("Creating a disk %q", name)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe include the disk size in the log, so show how the size was parsed:

Suggested change
logrus.Infof("Creating a disk %q", name)
logrus.Infof("Creating disk %q size %s", name, units.BytesSize(float64(diskSize)))


if err := os.MkdirAll(diskDir, 0700); err != nil {
return err
}

if err := qemu.CreateDataDisk(diskDir, int(diskSize)); err != nil {
return err
}

return nil
}

func newDiskListCommand() *cobra.Command {
var diskListCommand = &cobra.Command{
Use: "list",
Example: `
To list existing disks:
$ limactl disk list
`,
Short: "List existing Lima disks",
Aliases: []string{"ls"},
Args: cobra.NoArgs,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thought for a later PR (low priority):

I think it would be good to allow disk names to be passed as args (for symmetry with limactl ls, which allows instance names), and then only emit the data for those disk images.

RunE: diskListAction,
}
diskListCommand.Flags().Bool("json", false, "JSONify output")
return diskListCommand
}

func diskListAction(cmd *cobra.Command, args []string) error {
jsonFormat, err := cmd.Flags().GetBool("json")
if err != nil {
return err
}

allDisks, err := store.Disks()
if err != nil {
return err
}

if jsonFormat {
for _, diskName := range allDisks {
disk, err := store.InspectDisk(diskName)
if err != nil {
logrus.WithError(err).Errorf("disk %q does not exist?", diskName)
continue
}
j, err := json.Marshal(disk)
if err != nil {
return err
}
fmt.Fprintln(cmd.OutOrStdout(), string(j))
}
return nil
}

w := tabwriter.NewWriter(cmd.OutOrStdout(), 4, 8, 4, ' ', 0)
fmt.Fprintln(w, "NAME\tSIZE\tDIR\tIN USE BY")

if len(allDisks) == 0 {
logrus.Warn("No disk found. Run `limactl disk create DISK --size SIZE` to create a disk.")
}

for _, diskName := range allDisks {
disk, err := store.InspectDisk(diskName)
if err != nil {
logrus.WithError(err).Errorf("disk %q does not exist?", diskName)
continue
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", disk.Name, units.BytesSize(float64(disk.Size)), disk.Dir, disk.Instance)
}

return w.Flush()
}

func newDiskDeleteCommand() *cobra.Command {
var diskDeleteCommand = &cobra.Command{
Use: "delete DISK [DISK, ...]",
Example: `
To delete a disk:
$ limactl disk delete DISK

To delete multiple disks:
$ limactl disk delete DISK1 DISK2 ...
`,
Aliases: []string{"remove", "rm"},
Short: "Delete one or more Lima disks",
Args: cobra.MinimumNArgs(1),
RunE: diskDeleteAction,
}
diskDeleteCommand.Flags().Bool("force", false, "force delete")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The option should allow -f shorthand, just like limactl delete -f INSTANCE:

Suggested change
diskDeleteCommand.Flags().Bool("force", false, "force delete")
diskDeleteCommand.Flags().BoolP("force", "f", false, "force delete")

return diskDeleteCommand
}

func diskDeleteAction(cmd *cobra.Command, args []string) error {
force, err := cmd.Flags().GetBool("force")
if err != nil {
return err
}

for _, diskName := range args {
if force {
disk, err := store.InspectDisk(diskName)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
logrus.Warnf("Ignoring non-existent disk %q", diskName)
continue
}
return err
}

if err := deleteDisk(disk); err != nil {
return fmt.Errorf("failed to delete disk %q: %w", diskName, err)
}
logrus.Infof("Deleted %q (%q)", diskName, disk.Dir)
continue
}

disk, err := store.InspectDisk(diskName)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
logrus.Warnf("Ignoring non-existent disk %q", diskName)
continue
}
return err
}
if disk.Instance != "" {
return fmt.Errorf("cannot delete disk %q in use by instance %q", disk.Name, disk.Instance)
Copy link
Member

@jandubois jandubois Nov 12, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It feels inconsistent to me that a locked disk will abort the command, but a non-existing disk is simply skipped:

limactl disk rm foo bar

If foo doesn't exist, bar will still be deleted. But if foo exists and is locked, then bar will not be deleted (and there is no message to the effect that additional disks have been skipped due to the error).

I think a locked disk should be logged as an error, but additional disks should still be processed.

And then I wonder if failures of deleteDisk() should be treated the same way too: log an error, but continue to attempt deleting the remaining disks?

Of course the command must exit with a non-zero status code if any disk couldn't be deleted. I'm not sure how non-existing disks should be treated: should they also cause a non-zero error?

@AkihiroSuda WDYT?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you open an issue for this? I think we should keep this behavior consistent between limactl disk delete and limactl delete, and I think this needs some more discussion.

}
instances, err := store.Instances()
if err != nil {
return err
}
Comment on lines +210 to +213
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be moved outside the for loop:

		instances, err := store.Instances()
		if err != nil {
			return err
		}
		for _, diskName := range args {
			...
		}

Although the "expensive" operation is store.Inspect(instName), so those would have to be cached outside the loop as well. Maybe not worth the extra code.

var refInstances []string
for _, instName := range instances {
inst, err := store.Inspect(instName)
if err != nil {
continue
}
if len(inst.AdditionalDisks) > 0 {
for _, d := range inst.AdditionalDisks {
if d == diskName {
refInstances = append(refInstances, instName)
}
}
}
}
if len(refInstances) > 0 {
logrus.Warnf("Skipping deleting disk %q, disk is referenced by one or more non-running instances: %q",
diskName, refInstances)
logrus.Warnf("To delete anyway, run %q", forceDeleteCommand(diskName))
continue
}
if err := deleteDisk(disk); err != nil {
return fmt.Errorf("failed to delete disk %q: %v", diskName, err)
}
logrus.Infof("Deleted %q (%q)", diskName, disk.Dir)
}
return nil
}

func deleteDisk(disk *store.Disk) error {
if err := os.RemoveAll(disk.Dir); err != nil {
return fmt.Errorf("failed to remove %q: %w", disk.Dir, err)
}
return nil
}

func forceDeleteCommand(diskName string) string {
return fmt.Sprintf("limactl disk delete --force %v", diskName)
}

func newDiskUnlockCommand() *cobra.Command {
var diskUnlockCommand = &cobra.Command{
Use: "unlock DISK [DISK, ...]",
Example: `
Emergency recovery! If an instance is force stopped, it may leave a disk locked while not actually using it.

To unlock a disk:
$ limactl disk unlock DISK

To unlock multiple disks:
$ limactl disk unlock DISK1 DISK2 ...
`,
Short: "Unlock one or more Lima disks",
Args: cobra.MinimumNArgs(1),
RunE: diskUnlockAction,
}
return diskUnlockCommand
}

func diskUnlockAction(cmd *cobra.Command, args []string) error {
for _, diskName := range args {
disk, err := store.InspectDisk(diskName)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
logrus.Warnf("Ignoring non-existent disk %q", diskName)
continue
}
return err
}
if disk.Instance == "" {
logrus.Warnf("Ignoring unlocked disk %q", diskName)
continue
}
// if store.Inspect throws an error, the instance does not exist, and it is safe to unlock
inst, err := store.Inspect(disk.Instance)
if err == nil {
if len(inst.Errors) > 0 {
logrus.Warnf("Cannot unlock disk %q, attached instance %q has errors: %+v",
diskName, disk.Instance, inst.Errors)
continue
}
if inst.Status == store.StatusRunning {
logrus.Warnf("Cannot unlock disk %q used by running instance %q", diskName, disk.Instance)
continue
}
}
if err := disk.Unlock(); err != nil {
return fmt.Errorf("failed to unlock disk %q: %w", diskName, err)
}
logrus.Infof("Unlocked disk %q (%q)", diskName, disk.Dir)
}
return nil
}
1 change: 1 addition & 0 deletions cmd/limactl/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ func newApp() *cobra.Command {
newDebugCommand(),
newEditCommand(),
newFactoryResetCommand(),
newDiskCommand(),
)
return rootCmd
}
Expand Down
11 changes: 11 additions & 0 deletions cmd/limactl/stop.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,17 @@ func stopInstanceForcibly(inst *store.Instance) {
logrus.Info("The QEMU process seems already stopped")
}

for _, diskName := range inst.AdditionalDisks {
disk, err := store.InspectDisk(diskName)
if err != nil {
logrus.Warnf("Disk %q does not exist", diskName)
continue
}
if err := disk.Unlock(); err != nil {
logrus.Warnf("Failed to unlock disk %q. To use, run `limactl disk unlock %v`", diskName, diskName)
}
}

if inst.HostAgentPID > 0 {
logrus.Infof("Sending SIGKILL to the host agent process %d", inst.HostAgentPID)
if err := osutil.SysKill(inst.HostAgentPID, osutil.SigKill); err != nil {
Expand Down
10 changes: 10 additions & 0 deletions docs/internal.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,16 @@ Host agent:
- `ha.stdout.log`: hostagent stdout (JSON lines, see `pkg/hostagent/events.Event`)
- `ha.stderr.log`: hostagent stderr (human-readable messages)

## Disk directory (`${LIMA_HOME}/_disk/<DISK>`)

A disk directory contains the following files:

data disk:
- `datadisk`: the qcow2 disk that is attached to an instance

lock:
- `in_use_by`: symlink to the instance directory that is using the disk

## Lima cache directory (`~/Library/Caches/lima`)

Currently hard-coded to `~/Library/Caches/lima` on macOS.
Expand Down
9 changes: 9 additions & 0 deletions examples/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,15 @@ mounts:
# 🟢 Builtin default: "reverse-sshfs"
mountType: null

# Lima disks to attach to the instance. The disks will be accessible from inside the
# instance, labeled by name. (e.g. if the disk is named "data", it will be labeled
# "lima-data" inside the instance). The disk will be mounted inside the instance at
# `/mnt/lima-${VOLUME}`.
# 🟢 Builtin default: null
additionalDisks:
# disks should be a list of disk name strings, for example:
# - "data"

ssh:
# A localhost port of the host. Forwarded to port 22 of the guest.
# 🟢 Builtin default: 0 (automatically assigned to a free port)
Expand Down
Loading