Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix PATCH operation in virtio-block devices backed by asynchronous engine #4286

Merged
merged 5 commits into from
Dec 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 40 additions & 11 deletions src/vmm/src/devices/virtio/virtio_block/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,19 +71,20 @@ pub struct DiskProperties {
}

impl DiskProperties {
pub fn new(
disk_image_path: String,
is_disk_read_only: bool,
file_engine_type: FileEngineType,
) -> Result<Self, VirtioBlockError> {
let mut disk_image = OpenOptions::new()
// Helper function that opens the file with the proper access permissions
fn open_file(disk_image_path: &str, is_disk_read_only: bool) -> Result<File, VirtioBlockError> {
OpenOptions::new()
.read(true)
.write(!is_disk_read_only)
.open(PathBuf::from(&disk_image_path))
.map_err(|x| VirtioBlockError::BackingFile(x, disk_image_path.clone()))?;
.map_err(|x| VirtioBlockError::BackingFile(x, disk_image_path.to_string()))
}

// Helper function that gets the size of the file
fn file_size(disk_image_path: &str, disk_image: &mut File) -> Result<u64, VirtioBlockError> {
let disk_size = disk_image
.seek(SeekFrom::End(0))
.map_err(|x| VirtioBlockError::BackingFile(x, disk_image_path.clone()))?;
.map_err(|x| VirtioBlockError::BackingFile(x, disk_image_path.to_string()))?;

// We only support disk size, which uses the first two words of the configuration space.
// If the image is not a multiple of the sector size, the tail bits are not exposed.
Expand All @@ -95,6 +96,17 @@ impl DiskProperties {
);
}

Ok(disk_size)
}

/// Create a new file for the block device using a FileEngine
pub fn new(
disk_image_path: String,
is_disk_read_only: bool,
file_engine_type: FileEngineType,
) -> Result<Self, VirtioBlockError> {
let mut disk_image = Self::open_file(&disk_image_path, is_disk_read_only)?;
let disk_size = Self::file_size(&disk_image_path, &mut disk_image)?;
let image_id = Self::build_disk_image_id(&disk_image);

Ok(Self {
Expand All @@ -106,6 +118,25 @@ impl DiskProperties {
})
}

/// Update the path to the file backing the block device
pub fn update(
&mut self,
disk_image_path: String,
is_disk_read_only: bool,
) -> Result<(), VirtioBlockError> {
let mut disk_image = Self::open_file(&disk_image_path, is_disk_read_only)?;
let disk_size = Self::file_size(&disk_image_path, &mut disk_image)?;

self.image_id = Self::build_disk_image_id(&disk_image);
self.file_engine
.update_file_path(disk_image)
.map_err(VirtioBlockError::FileEngine)?;
self.nsectors = disk_size >> SECTOR_SHIFT;
self.file_path = disk_image_path;

Ok(())
}

fn build_device_id(disk_file: &File) -> Result<String, VirtioBlockError> {
let blk_metadata = disk_file
.metadata()
Expand Down Expand Up @@ -506,9 +537,7 @@ impl VirtioBlock {

/// Update the backing file and the config space of the block device.
pub fn update_disk_image(&mut self, disk_image_path: String) -> Result<(), VirtioBlockError> {
let disk_properties =
DiskProperties::new(disk_image_path, self.read_only, self.file_engine_type())?;
self.disk = disk_properties;
self.disk.update(disk_image_path, self.read_only)?;
self.config_space = self.disk.virtio_block_config_space();

// Kick the driver to pick up the changes.
Expand Down
32 changes: 23 additions & 9 deletions src/vmm/src/devices/virtio/virtio_block/io/async_io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
use std::fmt::Debug;
use std::fs::File;
use std::marker::PhantomData;
use std::os::fd::RawFd;
use std::os::unix::io::AsRawFd;

use utils::eventfd::EventFd;
Expand All @@ -13,7 +14,7 @@ use crate::devices::virtio::virtio_block::io::UserDataError;
use crate::devices::virtio::virtio_block::IO_URING_NUM_ENTRIES;
use crate::io_uring::operation::{Cqe, OpCode, Operation};
use crate::io_uring::restriction::Restriction;
use crate::io_uring::{IoUring, IoUringError};
use crate::io_uring::{self, IoUring, IoUringError};
use crate::logger::log_dev_preview_warning;
use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryExtension, GuestMemoryMmap};

Expand Down Expand Up @@ -66,13 +67,10 @@ impl<T: Debug> WrappedUserData<T> {
}

impl<T: Debug> AsyncFileEngine<T> {
pub fn from_file(file: File) -> Result<AsyncFileEngine<T>, AsyncIoError> {
log_dev_preview_warning("Async file IO", Option::None);

let completion_evt = EventFd::new(libc::EFD_NONBLOCK).map_err(AsyncIoError::EventFd)?;
let ring = IoUring::new(
fn new_ring(file: &File, completion_fd: RawFd) -> Result<IoUring, io_uring::IoUringError> {
IoUring::new(
u32::from(IO_URING_NUM_ENTRIES),
vec![&file],
vec![file],
vec![
// Make sure we only allow operations on pre-registered fds.
Restriction::RequireFixedFds,
Expand All @@ -81,9 +79,16 @@ impl<T: Debug> AsyncFileEngine<T> {
Restriction::AllowOpCode(OpCode::Write),
Restriction::AllowOpCode(OpCode::Fsync),
],
Some(completion_evt.as_raw_fd()),
Some(completion_fd),
)
.map_err(AsyncIoError::IoUring)?;
}

pub fn from_file(file: File) -> Result<AsyncFileEngine<T>, AsyncIoError> {
log_dev_preview_warning("Async file IO", Option::None);

let completion_evt = EventFd::new(libc::EFD_NONBLOCK).map_err(AsyncIoError::EventFd)?;
let ring =
Self::new_ring(&file, completion_evt.as_raw_fd()).map_err(AsyncIoError::IoUring)?;

Ok(AsyncFileEngine {
file,
Expand All @@ -93,6 +98,15 @@ impl<T: Debug> AsyncFileEngine<T> {
})
}

pub fn update_file(&mut self, file: File) -> Result<(), AsyncIoError> {
let ring = Self::new_ring(&file, self.completion_evt.as_raw_fd())
.map_err(AsyncIoError::IoUring)?;

self.file = file;
self.ring = ring;
Ok(())
}

#[cfg(test)]
pub fn file(&self) -> &File {
&self.file
Expand Down
9 changes: 9 additions & 0 deletions src/vmm/src/devices/virtio/virtio_block/io/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,15 @@ impl<T: Debug> FileEngine<T> {
}
}

pub fn update_file_path(&mut self, file: File) -> Result<(), BlockIoError> {
match self {
FileEngine::Async(engine) => engine.update_file(file).map_err(BlockIoError::Async)?,
FileEngine::Sync(engine) => engine.update_file(file),
};

Ok(())
}

#[cfg(test)]
pub fn file(&self) -> &File {
match self {
Expand Down
5 changes: 5 additions & 0 deletions src/vmm/src/devices/virtio/virtio_block/io/sync_io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ impl SyncFileEngine {
&self.file
}

/// Update the backing file of the engine
pub fn update_file(&mut self, file: File) {
self.file = file
}

pub fn read(
&mut self,
offset: u64,
Expand Down
41 changes: 23 additions & 18 deletions tests/integration_tests/functional/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def test_drive_io_engine(test_microvm_with_api):
assert test_microvm.api.vm_config.get().json()["drives"][0]["io_engine"] == "Sync"


def test_api_put_update_pre_boot(test_microvm_with_api):
def test_api_put_update_pre_boot(test_microvm_with_api, io_engine):
"""
Test that PUT updates are allowed before the microvm boots.

Expand All @@ -111,6 +111,7 @@ def test_api_put_update_pre_boot(test_microvm_with_api):
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_root_device=False,
is_read_only=False,
io_engine=io_engine,
)

# Updates to `kernel_image_path` with an invalid path are not allowed.
Expand All @@ -132,6 +133,7 @@ def test_api_put_update_pre_boot(test_microvm_with_api):
path_on_host="foo.bar",
is_read_only=True,
is_root_device=True,
io_engine=io_engine,
)

# Updates to `is_root_device` that result in two root block devices are not
Expand All @@ -142,6 +144,7 @@ def test_api_put_update_pre_boot(test_microvm_with_api):
path_on_host=test_microvm.get_jailed_resource(fs1.path),
is_read_only=False,
is_root_device=True,
io_engine=io_engine,
)

# Valid updates to `path_on_host` and `is_read_only` are allowed.
Expand All @@ -151,6 +154,7 @@ def test_api_put_update_pre_boot(test_microvm_with_api):
path_on_host=test_microvm.create_jailed_resource(fs2.path),
is_read_only=True,
is_root_device=False,
io_engine=io_engine,
)

# Valid updates to all fields in the machine configuration are allowed.
Expand Down Expand Up @@ -473,7 +477,7 @@ def test_api_cpu_config(test_microvm_with_api, custom_cpu_template):
test_microvm.api.cpu_config.put(**custom_cpu_template["template"])


def test_api_put_update_post_boot(test_microvm_with_api):
def test_api_put_update_post_boot(test_microvm_with_api, io_engine):
"""
Test that PUT updates are rejected after the microvm boots.
"""
Expand Down Expand Up @@ -520,6 +524,7 @@ def test_api_put_update_post_boot(test_microvm_with_api):
path_on_host=test_microvm.jailer.jailed_path(test_microvm.rootfs_file),
is_read_only=False,
is_root_device=True,
io_engine=io_engine,
)

# MMDS config is not allowed post-boot.
Expand All @@ -532,7 +537,7 @@ def test_api_put_update_post_boot(test_microvm_with_api):
test_microvm.api.mmds_config.put(**mmds_config)


def test_rate_limiters_api_config(test_microvm_with_api):
def test_rate_limiters_api_config(test_microvm_with_api, io_engine):
"""
Test the IO rate limiter API config.
"""
Expand All @@ -549,6 +554,7 @@ def test_rate_limiters_api_config(test_microvm_with_api):
is_read_only=False,
is_root_device=False,
rate_limiter={"bandwidth": {"size": 1000000, "refill_time": 100}},
io_engine=io_engine,
)

# Test drive with ops rate-limiting.
Expand All @@ -559,6 +565,7 @@ def test_rate_limiters_api_config(test_microvm_with_api):
is_read_only=False,
is_root_device=False,
rate_limiter={"ops": {"size": 1, "refill_time": 100}},
io_engine=io_engine,
)

# Test drive with bw and ops rate-limiting.
Expand All @@ -572,6 +579,7 @@ def test_rate_limiters_api_config(test_microvm_with_api):
"bandwidth": {"size": 1000000, "refill_time": 100},
"ops": {"size": 1, "refill_time": 100},
},
io_engine=io_engine,
)

# Test drive with 'empty' rate-limiting (same as not specifying the field)
Expand All @@ -582,6 +590,7 @@ def test_rate_limiters_api_config(test_microvm_with_api):
is_read_only=False,
is_root_device=False,
rate_limiter={},
io_engine=io_engine,
)

# Test the NET rate limiting API.
Expand Down Expand Up @@ -636,7 +645,7 @@ def test_rate_limiters_api_config(test_microvm_with_api):
)


def test_api_patch_pre_boot(test_microvm_with_api):
def test_api_patch_pre_boot(test_microvm_with_api, io_engine):
"""
Test that PATCH updates are not allowed before the microvm boots.
"""
Expand All @@ -654,6 +663,7 @@ def test_api_patch_pre_boot(test_microvm_with_api):
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_root_device=False,
is_read_only=False,
io_engine=io_engine,
)

iface_id = "1"
Expand Down Expand Up @@ -685,7 +695,7 @@ def test_api_patch_pre_boot(test_microvm_with_api):
test_microvm.api.network.patch(iface_id=iface_id)


def test_negative_api_patch_post_boot(test_microvm_with_api):
def test_negative_api_patch_post_boot(test_microvm_with_api, io_engine):
"""
Test PATCH updates that are not allowed after the microvm boots.
"""
Expand All @@ -702,6 +712,7 @@ def test_negative_api_patch_post_boot(test_microvm_with_api):
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_root_device=False,
is_read_only=False,
io_engine=io_engine,
)

iface_id = "1"
Expand Down Expand Up @@ -784,19 +795,13 @@ def test_send_ctrl_alt_del(test_microvm_with_api):

# If everything goes as expected, the guest OS will issue a reboot,
# causing Firecracker to exit.
# We'll keep poking Firecracker for at most 30 seconds, waiting for it
# to die.
start_time = time.time()
shutdown_ok = False
while time.time() - start_time < 30:
try:
os.kill(firecracker_pid, 0)
time.sleep(0.01)
except OSError:
shutdown_ok = True
break

assert shutdown_ok
# waitpid should block until the Firecracker process has exited. If
# it has already exited by the time we call waitpid, WNOHANG causes
# waitpid to raise a ChildProcessError exception.
try:
os.waitpid(firecracker_pid, os.WNOHANG)
except ChildProcessError:
pass


def _drive_patch(test_microvm):
Expand Down
Loading