From c97cc182ab0bec58493cd60346cc91f697368758 Mon Sep 17 00:00:00 2001 From: Luke Yue Date: Mon, 2 Sep 2024 08:39:17 -0700 Subject: [PATCH 1/3] feat(qp): introduce post send guard and implement for qp_ex We introduce a unified interface for post send operations on qp and qp_ex, with the flavor of `ibv_wr_*` APIs. We use lifetime annotations to make sure we are holding one live work request at most at the same time for one qp. Signed-off-by: Luke Yue --- examples/test_post_send.rs | 102 ++++++++++++++ src/verbs/queue_pair.rs | 279 ++++++++++++++++++++++++++++++++++++- 2 files changed, 377 insertions(+), 4 deletions(-) create mode 100644 examples/test_post_send.rs diff --git a/examples/test_post_send.rs b/examples/test_post_send.rs new file mode 100644 index 0000000..44e3fde --- /dev/null +++ b/examples/test_post_send.rs @@ -0,0 +1,102 @@ +use core::time; +use std::thread; + +use sideway::verbs::{ + address::AddressHandleAttribute, + device, + device_context::Mtu, + queue_pair::{PostSendGuard, QueuePair, QueuePairAttribute, QueuePairState, SetInlineData, WorkRequestFlags}, + AccessFlags, +}; + +fn main() -> Result<(), Box> { + let device_list = device::DeviceList::new()?; + for device in &device_list { + let ctx = device.open().unwrap(); + + let pd = ctx.alloc_pd().unwrap(); + let mr = pd.reg_managed_mr(64).unwrap(); + + let _comp_channel = ctx.create_comp_channel().unwrap(); + let mut cq_builder = ctx.create_cq_builder(); + let sq = cq_builder.setup_cqe(128).build_ex().unwrap(); + let rq = cq_builder.setup_cqe(128).build_ex().unwrap(); + + let mut builder = pd.create_qp_builder(); + + let mut qp = builder + .setup_max_inline_data(128) + .setup_send_cq(&sq) + .setup_recv_cq(&rq) + .build_ex() + .unwrap(); + + println!("qp pointer is {:?}", qp); + // modify QP to INIT state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::Init) + .setup_pkey_index(0) + .setup_port(1) + .setup_access_flags(AccessFlags::LocalWrite | AccessFlags::RemoteWrite); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::Init, qp.state()); + + // modify QP to RTR state, set dest qp as itself + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToReceive) + .setup_path_mtu(Mtu::Mtu1024) + .setup_dest_qp_num(qp.qp_number()) + .setup_rq_psn(1) + .setup_max_dest_read_atomic(0) + .setup_min_rnr_timer(0); + // setup address vector + let mut ah_attr = AddressHandleAttribute::new(); + let gid_entries = ctx.query_gid_table().unwrap(); + + ah_attr + .setup_dest_lid(1) + .setup_port(1) + .setup_service_level(1) + .setup_grh_src_gid_index(gid_entries[0].gid_index().try_into().unwrap()) + .setup_grh_dest_gid(&gid_entries[0].gid()) + .setup_grh_hop_limit(64); + attr.setup_address_vector(&ah_attr); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToReceive, qp.state()); + + // modify QP to RTS state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToSend) + .setup_sq_psn(1) + .setup_timeout(12) + .setup_retry_cnt(7) + .setup_rnr_retry(7) + .setup_max_read_atomic(0); + + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToSend, qp.state()); + + let mut guard = qp.start_post_send(); + let buf = vec![0, 1, 2, 3]; + + let write_handle = guard + .construct_wr(233, WorkRequestFlags::Signaled) + .setup_write(mr.rkey(), mr.buf.data.as_ptr() as _); + + write_handle.setup_inline_data(&buf); + + let _err = guard.post().unwrap(); + + thread::sleep(time::Duration::from_millis(5)); + + unsafe { + let slice = std::slice::from_raw_parts(mr.buf.data.as_ptr(), mr.buf.len); + println!("Buffer contents: {:?}", slice); + } + } + + Ok(()) +} diff --git a/src/verbs/queue_pair.rs b/src/verbs/queue_pair.rs index 9468250..67265ef 100644 --- a/src/verbs/queue_pair.rs +++ b/src/verbs/queue_pair.rs @@ -1,12 +1,14 @@ use bitmask_enum::bitmask; use lazy_static::lazy_static; use rdma_mummy_sys::{ - ibv_create_qp, ibv_create_qp_ex, ibv_destroy_qp, ibv_modify_qp, ibv_qp, ibv_qp_attr, ibv_qp_attr_mask, ibv_qp_cap, - ibv_qp_create_send_ops_flags, ibv_qp_ex, ibv_qp_init_attr, ibv_qp_init_attr_ex, ibv_qp_init_attr_mask, - ibv_qp_state, ibv_qp_to_qp_ex, ibv_qp_type, ibv_rx_hash_conf, + ibv_create_qp, ibv_create_qp_ex, ibv_data_buf, ibv_destroy_qp, ibv_modify_qp, ibv_qp, ibv_qp_attr, + ibv_qp_attr_mask, ibv_qp_cap, ibv_qp_create_send_ops_flags, ibv_qp_ex, ibv_qp_init_attr, ibv_qp_init_attr_ex, + ibv_qp_init_attr_mask, ibv_qp_state, ibv_qp_to_qp_ex, ibv_qp_type, ibv_rx_hash_conf, ibv_send_flags, ibv_wr_abort, + ibv_wr_complete, ibv_wr_opcode, ibv_wr_rdma_write, ibv_wr_send, ibv_wr_set_inline_data, + ibv_wr_set_inline_data_list, ibv_wr_start, }; use std::{ - io, + io::{self, IoSlice}, marker::PhantomData, mem::MaybeUninit, ptr::{null_mut, NonNull}, @@ -75,6 +77,58 @@ pub enum SendOperationFlags { AtomicWrite = ibv_qp_create_send_ops_flags::IBV_QP_EX_WITH_ATOMIC_WRITE.0 as _, } +#[repr(u32)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum WorkRequestOperationType { + Send = ibv_wr_opcode::IBV_WR_SEND, + SendWithImmediate = ibv_wr_opcode::IBV_WR_SEND_WITH_IMM, + Write = ibv_wr_opcode::IBV_WR_RDMA_WRITE, + WriteWithImmediate = ibv_wr_opcode::IBV_WR_RDMA_WRITE_WITH_IMM, + Read = ibv_wr_opcode::IBV_WR_RDMA_READ, + AtomicCompareAndSwap = ibv_wr_opcode::IBV_WR_ATOMIC_CMP_AND_SWP, + AtomicFetchAndAdd = ibv_wr_opcode::IBV_WR_ATOMIC_FETCH_AND_ADD, + LocalInvalidate = ibv_wr_opcode::IBV_WR_LOCAL_INV, + BindMemoryWindow = ibv_wr_opcode::IBV_WR_BIND_MW, + SendWithInvalidate = ibv_wr_opcode::IBV_WR_SEND_WITH_INV, + TcpSegmentationOffload = ibv_wr_opcode::IBV_WR_TSO, + Driver1 = ibv_wr_opcode::IBV_WR_DRIVER1, + Flush = ibv_wr_opcode::IBV_WR_FLUSH, + AtomicWrite = ibv_wr_opcode::IBV_WR_ATOMIC_WRITE, +} + +impl From for WorkRequestOperationType { + fn from(opcode: u32) -> Self { + match opcode { + ibv_wr_opcode::IBV_WR_SEND => WorkRequestOperationType::Send, + ibv_wr_opcode::IBV_WR_SEND_WITH_IMM => WorkRequestOperationType::SendWithImmediate, + ibv_wr_opcode::IBV_WR_RDMA_WRITE => WorkRequestOperationType::Write, + ibv_wr_opcode::IBV_WR_RDMA_WRITE_WITH_IMM => WorkRequestOperationType::WriteWithImmediate, + ibv_wr_opcode::IBV_WR_RDMA_READ => WorkRequestOperationType::Read, + ibv_wr_opcode::IBV_WR_ATOMIC_CMP_AND_SWP => WorkRequestOperationType::AtomicCompareAndSwap, + ibv_wr_opcode::IBV_WR_ATOMIC_FETCH_AND_ADD => WorkRequestOperationType::AtomicFetchAndAdd, + ibv_wr_opcode::IBV_WR_LOCAL_INV => WorkRequestOperationType::LocalInvalidate, + ibv_wr_opcode::IBV_WR_BIND_MW => WorkRequestOperationType::BindMemoryWindow, + ibv_wr_opcode::IBV_WR_SEND_WITH_INV => WorkRequestOperationType::SendWithInvalidate, + ibv_wr_opcode::IBV_WR_TSO => WorkRequestOperationType::TcpSegmentationOffload, + ibv_wr_opcode::IBV_WR_DRIVER1 => WorkRequestOperationType::Driver1, + ibv_wr_opcode::IBV_WR_FLUSH => WorkRequestOperationType::Flush, + ibv_wr_opcode::IBV_WR_ATOMIC_WRITE => WorkRequestOperationType::AtomicWrite, + _ => panic!("Unknown work request opcode: {opcode}"), + } + } +} + +#[bitmask(u32)] +#[bitmask_config(vec_debug)] +pub enum WorkRequestFlags { + Fence = ibv_send_flags::IBV_SEND_FENCE.0, + Signaled = ibv_send_flags::IBV_SEND_SIGNALED.0, + Solicited = ibv_send_flags::IBV_SEND_SOLICITED.0, + Inline = ibv_send_flags::IBV_SEND_INLINE.0, + IpChecksum = ibv_send_flags::IBV_SEND_IP_CSUM.0, +} + +#[allow(private_bounds)] pub trait QueuePair { //! return the basic handle of QP; //! we mark this method unsafe because the lifetime of ibv_qp is not @@ -103,6 +157,51 @@ pub trait QueuePair { fn state(&self) -> QueuePairState { unsafe { self.qp().as_ref().state.into() } } + + fn qp_number(&self) -> u32 { + unsafe { self.qp().as_ref().qp_num } + } + + // Every qp should hold only one PostSendGuard at the same time. + // + // RPITIT could be used here, but with lifetime bound, there could be problems. + // + // Ref: https://github.com/rust-lang/rust/issues/128752 + // https://github.com/rust-lang/rust/issues/91611 + // https://github.com/rust-lang/rfcs/pull/3425 + // https://github.com/rust-lang/rust/issues/125836 + // + type Guard<'g>: PostSendGuard + where + Self: 'g; + fn start_post_send<'qp, 'g>(&'qp mut self) -> Self::Guard<'g> + where + 'qp: 'g; +} + +mod private_traits { + use std::io::IoSlice; + // This is the private part of PostSendGuard, which is a workaround for pub trait + // not being able to have private functions. + // + // Ref: https://stackoverflow.com/questions/53204327/how-to-have-a-private-part-of-a-trait + // + pub trait PostSendGuard { + fn setup_send(&mut self); + + fn setup_write(&mut self, rkey: u32, remote_addr: u64); + + fn setup_inline_data(&mut self, buf: &[u8]); + + fn setup_inline_data_list(&mut self, bufs: &[IoSlice<'_>]); + } +} + +pub trait PostSendGuard: private_traits::PostSendGuard { + // every qp should hold only one WorkRequestHandle at the same time + fn construct_wr<'g>(&'g mut self, wr_id: u64, wr_flags: WorkRequestFlags) -> WorkRequestHandle<'g, Self>; + + fn post(self) -> Result<(), String>; } // According to C standard, enums should be int, but Rust just uses whatever @@ -293,6 +392,14 @@ impl QueuePair for BasicQueuePair<'_> { unsafe fn qp(&self) -> NonNull { self.qp } + + type Guard<'g> = BasicPostSendGuard<'g> where Self: 'g; + fn start_post_send<'qp, 'g>(&'qp mut self) -> Self::Guard<'g> + where + 'qp: 'g, + { + todo!() + } } #[derive(Debug)] @@ -313,6 +420,23 @@ impl QueuePair for ExtendedQueuePair<'_> { unsafe fn qp(&self) -> NonNull { NonNull::new_unchecked(&mut (*self.qp_ex.as_ptr()).qp_base as _) } + + type Guard<'g> = ExtendedPostSendGuard<'g> where Self: 'g; + fn start_post_send<'qp, 'g>(&'qp mut self) -> Self::Guard<'g> + where + 'qp: 'g, + { + unsafe { + ibv_wr_start(self.qp().as_ptr() as _); + } + + let guard = ExtendedPostSendGuard { + qp_ex: Some(self.qp_ex), + _phantom: PhantomData, + }; + + guard + } } pub struct QueuePairBuilder<'res> { @@ -598,3 +722,150 @@ fn attr_mask_check( Err(format!("Invalid transition from {cur_state:?} to {next_state:?}, possible invalid masks {invalid:?}, possible needed masks {needed:?}")) } } + +pub struct WorkRequestHandle<'g, G: PostSendGuard + ?Sized> { + guard: &'g mut G, +} + +pub trait SetSge {} + +pub trait SetInlineData { + fn setup_inline_data<'qp>(self, buf: &[u8]); + + fn setup_inline_data_list<'qp>(self, bufs: &[IoSlice<'_>]); +} + +pub struct SendHandle<'g, G: PostSendGuard> { + guard: &'g mut G, +} + +pub struct WriteHandle<'g, G: PostSendGuard> { + guard: &'g mut G, +} + +impl<'g, G: PostSendGuard> SetInlineData for WriteHandle<'g, G> { + fn setup_inline_data(self, buf: &[u8]) { + self.guard.setup_inline_data(buf); + } + + fn setup_inline_data_list(self, bufs: &[IoSlice<'_>]) { + self.guard.setup_inline_data_list(bufs); + } +} + +impl<'g, G: PostSendGuard> WorkRequestHandle<'g, G> { + pub fn setup_send(self) -> SendHandle<'g, G> { + self.guard.setup_send(); + SendHandle { guard: self.guard } + } + + pub fn setup_write(self, rkey: u32, remote_addr: u64) -> WriteHandle<'g, G> { + self.guard.setup_write(rkey, remote_addr); + WriteHandle { guard: self.guard } + } +} + +pub struct BasicPostSendGuard<'qp> { + qp: NonNull, + _phantom: PhantomData<&'qp ()>, +} + +impl PostSendGuard for BasicPostSendGuard<'_> { + fn construct_wr<'qp>(&'qp mut self, wr_id: u64, wr_flags: WorkRequestFlags) -> WorkRequestHandle<'qp, Self> { + todo!() + } + + fn post(self) -> Result<(), String> { + todo!() + } +} + +impl private_traits::PostSendGuard for BasicPostSendGuard<'_> { + fn setup_send(&mut self) { + todo!() + } + + fn setup_write(&mut self, rkey: u32, remote_addr: u64) { + todo!() + } + + fn setup_inline_data(&mut self, buf: &[u8]) { + todo!() + } + + fn setup_inline_data_list(&mut self, bufs: &[IoSlice<'_>]) { + todo!() + } +} + +pub struct ExtendedPostSendGuard<'qp> { + qp_ex: Option>, + _phantom: PhantomData<&'qp ()>, +} + +impl PostSendGuard for ExtendedPostSendGuard<'_> { + fn construct_wr<'qp>(&'qp mut self, wr_id: u64, wr_flags: WorkRequestFlags) -> WorkRequestHandle<'qp, Self> { + unsafe { + self.qp_ex.unwrap_unchecked().as_mut().wr_id = wr_id; + self.qp_ex.unwrap_unchecked().as_mut().wr_flags = wr_flags.bits; + } + WorkRequestHandle { guard: self } + } + + fn post(mut self) -> Result<(), String> { + let ret: i32 = unsafe { ibv_wr_complete(self.qp_ex.unwrap_unchecked().as_ptr()) }; + + self.qp_ex = None; + + match ret { + 0 => Ok(()), + err => Err(format!("failed to ibv_wr_complete: ret {err}")), + } + } +} + +impl private_traits::PostSendGuard for ExtendedPostSendGuard<'_> { + fn setup_send(&mut self) { + unsafe { + ibv_wr_send(self.qp_ex.unwrap_unchecked().as_ptr()); + } + } + + fn setup_write(&mut self, rkey: u32, remote_addr: u64) { + unsafe { + ibv_wr_rdma_write(self.qp_ex.unwrap_unchecked().as_ptr(), rkey, remote_addr); + } + } + + fn setup_inline_data(&mut self, buf: &[u8]) { + unsafe { ibv_wr_set_inline_data(self.qp_ex.unwrap_unchecked().as_ptr(), buf.as_ptr() as _, buf.len()) } + } + + fn setup_inline_data_list(&mut self, bufs: &[IoSlice<'_>]) { + let mut buf_list = Vec::with_capacity(bufs.len()); + + buf_list.extend(bufs.iter().map(|x| ibv_data_buf { + addr: x.as_ptr() as _, + length: x.len(), + })); + + unsafe { + ibv_wr_set_inline_data_list( + self.qp_ex.unwrap_unchecked().as_ptr(), + buf_list.len(), + buf_list.as_ptr(), + ); + } + } +} + +impl Drop for ExtendedPostSendGuard<'_> { + fn drop(&mut self) { + match self.qp_ex { + Some(qp_ex) => unsafe { + ibv_wr_abort(qp_ex.as_ptr()); + }, + None => (), + } + } +} From cddf1475e0899e27364e78c7b238c9b13ea5d2c2 Mon Sep 17 00:00:00 2001 From: Luke Yue Date: Sat, 7 Sep 2024 08:11:59 -0700 Subject: [PATCH 2/3] feat(cq): implementation poll_cq for cq_ex We implement an iterator version of poll_cq for cq_ex, this adheres to the flavor of `ibv_start_poll` and `ibv_next_poll` APIs. Signed-off-by: Luke Yue --- examples/test_post_send.rs | 13 +++++- src/verbs/completion.rs | 93 +++++++++++++++++++++++++++++++++++++- 2 files changed, 103 insertions(+), 3 deletions(-) diff --git a/examples/test_post_send.rs b/examples/test_post_send.rs index 44e3fde..c83f863 100644 --- a/examples/test_post_send.rs +++ b/examples/test_post_send.rs @@ -90,7 +90,18 @@ fn main() -> Result<(), Box> { let _err = guard.post().unwrap(); - thread::sleep(time::Duration::from_millis(5)); + thread::sleep(time::Duration::from_millis(10)); + + // poll for the completion + { + let mut poller = sq.start_poll().unwrap(); + let mut wc = poller.iter_mut(); + println!("wr_id {}, status: {}, opcode: {}", wc.wr_id(), wc.status(), wc.opcode()); + assert_eq!(wc.wr_id(), 233); + while let Some(wc) = wc.next() { + println!("wr_id {}, status: {}, opcode: {}", wc.wr_id(), wc.status(), wc.opcode()) + } + } unsafe { let slice = std::slice::from_raw_parts(mr.buf.data.as_ptr(), mr.buf.len); diff --git a/src/verbs/completion.rs b/src/verbs/completion.rs index 6206585..769d5ee 100644 --- a/src/verbs/completion.rs +++ b/src/verbs/completion.rs @@ -1,12 +1,13 @@ -use std::marker::PhantomData; use std::os::raw::c_void; use std::ptr; use std::ptr::NonNull; +use std::{marker::PhantomData, mem::MaybeUninit}; use super::device_context::DeviceContext; use rdma_mummy_sys::{ ibv_comp_channel, ibv_cq, ibv_cq_ex, ibv_cq_init_attr_ex, ibv_create_comp_channel, ibv_create_cq, ibv_create_cq_ex, - ibv_destroy_comp_channel, ibv_destroy_cq, ibv_pd, + ibv_destroy_comp_channel, ibv_destroy_cq, ibv_end_poll, ibv_next_poll, ibv_pd, ibv_poll_cq_attr, ibv_start_poll, + ibv_wc_read_byte_len, ibv_wc_read_completion_ts, ibv_wc_read_opcode, ibv_wc_read_vendor_err, }; #[derive(Debug)] @@ -85,6 +86,25 @@ impl CompletionQueue for ExtendedCompletionQueue<'_> { } } +impl ExtendedCompletionQueue<'_> { + pub fn start_poll<'cq>(&'cq self) -> Result, String> { + let ret = unsafe { + ibv_start_poll( + self.cq_ex.as_ptr(), + MaybeUninit::::zeroed().as_mut_ptr(), + ) + }; + + match ret { + 0 => Ok(ExtendedPoller { + cq: self.cq_ex, + _phantom: PhantomData, + }), + err => Err(format!("ibv_start_poll failed, ret={err}")), + } + } +} + // generic builder for both cq and cq_ex pub struct CompletionQueueBuilder<'res> { dev_ctx: &'res DeviceContext, @@ -131,6 +151,7 @@ impl<'res> CompletionQueueBuilder<'res> { self.init_attr.comp_vector = comp_vector; self } + // TODO(fuji): set various attributes // build extended cq @@ -167,3 +188,71 @@ impl<'res> CompletionQueueBuilder<'res> { } // TODO trait for both cq and cq_ex? + +pub struct ExtendedWorkCompletion<'cq> { + cq: NonNull, + _phantom: PhantomData<&'cq ()>, +} + +impl<'cq> ExtendedWorkCompletion<'cq> { + pub fn wr_id(&self) -> u64 { + unsafe { self.cq.as_ref().wr_id } + } + + pub fn status(&self) -> u32 { + unsafe { self.cq.as_ref().status } + } + + pub fn opcode(&self) -> u32 { + unsafe { ibv_wc_read_opcode(self.cq.as_ptr()) } + } + + pub fn vendor_err(&self) -> u32 { + unsafe { ibv_wc_read_vendor_err(self.cq.as_ptr()) } + } + + pub fn byte_len(&self) -> u32 { + unsafe { ibv_wc_read_byte_len(self.cq.as_ptr()) } + } + + pub fn completion_timestamp(&self) -> u64 { + unsafe { ibv_wc_read_completion_ts(self.cq.as_ptr()) } + } +} + +pub struct ExtendedPoller<'cq> { + cq: NonNull, + _phantom: PhantomData<&'cq ()>, +} + +impl ExtendedPoller<'_> { + pub fn iter_mut(&mut self) -> ExtendedWorkCompletion { + ExtendedWorkCompletion { + cq: self.cq, + _phantom: PhantomData, + } + } +} + +impl<'a> Iterator for ExtendedWorkCompletion<'a> { + type Item = ExtendedWorkCompletion<'a>; + + fn next(&mut self) -> Option { + let ret = unsafe { ibv_next_poll(self.cq.as_ptr()) }; + + if ret != 0 { + None + } else { + Some(ExtendedWorkCompletion { + cq: self.cq, + _phantom: PhantomData, + }) + } + } +} + +impl Drop for ExtendedPoller<'_> { + fn drop(&mut self) { + unsafe { ibv_end_poll(self.cq.as_ptr()) } + } +} From 93079b4c61a7bece3a604a99ddbb34ed741a98f2 Mon Sep 17 00:00:00 2001 From: Luke Yue Date: Sun, 15 Sep 2024 21:47:57 -0700 Subject: [PATCH 3/3] test(qp): add post send guard lifetime bounding check for extended qp We introduce trybuild as dev-dependency, and add some misused code which should fail to compile, so that we can check the lifetime annotation of PostSendGuard is working as expected. Signed-off-by: Luke Yue --- Cargo.toml | 3 + tests/compiletest.rs | 5 + .../one_guard_has_only_one_handle.rs | 96 +++++++++++++++++++ .../one_guard_has_only_one_handle.stderr | 11 +++ .../one_guard_has_only_one_wr.rs | 93 ++++++++++++++++++ .../one_guard_has_only_one_wr.stderr | 11 +++ .../one_qp_has_only_one_guard.rs | 91 ++++++++++++++++++ .../one_qp_has_only_one_guard.stderr | 11 +++ 8 files changed, 321 insertions(+) create mode 100644 tests/compiletest.rs create mode 100644 tests/post_send_guard/one_guard_has_only_one_handle.rs create mode 100644 tests/post_send_guard/one_guard_has_only_one_handle.stderr create mode 100644 tests/post_send_guard/one_guard_has_only_one_wr.rs create mode 100644 tests/post_send_guard/one_guard_has_only_one_wr.stderr create mode 100644 tests/post_send_guard/one_qp_has_only_one_guard.rs create mode 100644 tests/post_send_guard/one_qp_has_only_one_guard.stderr diff --git a/Cargo.toml b/Cargo.toml index 0b7cdbb..e5e4dc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,3 +18,6 @@ libc = "0.2" os_socketaddr = "0.2" bitmask-enum = "2.2" lazy_static = "1.5.0" + +[dev-dependencies] +trybuild = "1.0" diff --git a/tests/compiletest.rs b/tests/compiletest.rs new file mode 100644 index 0000000..4b66e65 --- /dev/null +++ b/tests/compiletest.rs @@ -0,0 +1,5 @@ +#[test] +fn compile_test() { + let t = trybuild::TestCases::new(); + t.compile_fail("tests/post_send_guard/*.rs"); +} diff --git a/tests/post_send_guard/one_guard_has_only_one_handle.rs b/tests/post_send_guard/one_guard_has_only_one_handle.rs new file mode 100644 index 0000000..6dfa525 --- /dev/null +++ b/tests/post_send_guard/one_guard_has_only_one_handle.rs @@ -0,0 +1,96 @@ +use sideway::verbs::{ + address::AddressHandleAttribute, + device, + device_context::Mtu, + queue_pair::{PostSendGuard, QueuePair, QueuePairAttribute, QueuePairState, SetInlineData, WorkRequestFlags}, + AccessFlags, +}; + +fn main() -> Result<(), Box> { + let device_list = device::DeviceList::new()?; + for device in &device_list { + let ctx = device.open().unwrap(); + + let pd = ctx.alloc_pd().unwrap(); + let mr = pd.reg_managed_mr(64).unwrap(); + + let _comp_channel = ctx.create_comp_channel().unwrap(); + let mut cq_builder = ctx.create_cq_builder(); + let sq = cq_builder.setup_cqe(128).build_ex().unwrap(); + let rq = cq_builder.setup_cqe(128).build_ex().unwrap(); + + let mut builder = pd.create_qp_builder(); + + // block for extended qp + { + let mut qp = builder + .setup_max_inline_data(128) + .setup_send_cq(&sq) + .setup_recv_cq(&rq) + .build_ex() + .unwrap(); + + println!("qp pointer is {:?}", qp); + // modify QP to INIT state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::Init) + .setup_pkey_index(0) + .setup_port(1) + .setup_access_flags(AccessFlags::LocalWrite | AccessFlags::RemoteWrite); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::Init, qp.state()); + + // modify QP to RTR state, set dest qp as itself + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToReceive) + .setup_path_mtu(Mtu::Mtu1024) + .setup_dest_qp_num(qp.qp_number()) + .setup_rq_psn(1) + .setup_max_dest_read_atomic(0) + .setup_min_rnr_timer(0); + // setup address vector + let mut ah_attr = AddressHandleAttribute::new(); + let gid_entries = ctx.query_gid_table().unwrap(); + + ah_attr + .setup_dest_lid(1) + .setup_port(1) + .setup_service_level(1) + .setup_grh_src_gid_index(gid_entries[0].gid_index().try_into().unwrap()) + .setup_grh_dest_gid(&gid_entries[0].gid()) + .setup_grh_hop_limit(64); + attr.setup_address_vector(&ah_attr); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToReceive, qp.state()); + + // modify QP to RTS state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToSend) + .setup_sq_psn(1) + .setup_timeout(12) + .setup_retry_cnt(7) + .setup_rnr_retry(7) + .setup_max_read_atomic(0); + + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToSend, qp.state()); + + let mut guard = qp.start_post_send(); + let buf = vec![0, 1, 2, 3]; + + let write_handle = guard + .construct_wr(233, WorkRequestFlags::Signaled) + .setup_write(mr.rkey(), mr.buf.data.as_ptr() as _); + + // while holding a write handle, we can't build a send handle at the same time + let _send_handle = guard.construct_wr(2, 0.into()).setup_send(); + + write_handle.setup_inline_data(&buf); + } + } + + Ok(()) +} diff --git a/tests/post_send_guard/one_guard_has_only_one_handle.stderr b/tests/post_send_guard/one_guard_has_only_one_handle.stderr new file mode 100644 index 0000000..7a62807 --- /dev/null +++ b/tests/post_send_guard/one_guard_has_only_one_handle.stderr @@ -0,0 +1,11 @@ +error[E0499]: cannot borrow `guard` as mutable more than once at a time + --> tests/post_send_guard/one_guard_has_only_one_handle.rs:89:32 + | +84 | let write_handle = guard + | ----- first mutable borrow occurs here +... +89 | let _send_handle = guard.construct_wr(2, 0.into()).setup_send(); + | ^^^^^ second mutable borrow occurs here +90 | +91 | write_handle.setup_inline_data(&buf); + | ------------ first borrow later used here diff --git a/tests/post_send_guard/one_guard_has_only_one_wr.rs b/tests/post_send_guard/one_guard_has_only_one_wr.rs new file mode 100644 index 0000000..f6731ae --- /dev/null +++ b/tests/post_send_guard/one_guard_has_only_one_wr.rs @@ -0,0 +1,93 @@ +use sideway::verbs::{ + address::AddressHandleAttribute, + device, + device_context::Mtu, + queue_pair::{PostSendGuard, QueuePair, QueuePairAttribute, QueuePairState, WorkRequestFlags}, + AccessFlags, +}; + +fn main() -> Result<(), Box> { + let device_list = device::DeviceList::new()?; + for device in &device_list { + let ctx = device.open().unwrap(); + + let pd = ctx.alloc_pd().unwrap(); + let mr = pd.reg_managed_mr(64).unwrap(); + + let _comp_channel = ctx.create_comp_channel().unwrap(); + let mut cq_builder = ctx.create_cq_builder(); + let sq = cq_builder.setup_cqe(128).build_ex().unwrap(); + let rq = cq_builder.setup_cqe(128).build_ex().unwrap(); + + let mut builder = pd.create_qp_builder(); + + // block for extended qp + { + let mut qp = builder + .setup_max_inline_data(128) + .setup_send_cq(&sq) + .setup_recv_cq(&rq) + .build_ex() + .unwrap(); + + println!("qp pointer is {:?}", qp); + // modify QP to INIT state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::Init) + .setup_pkey_index(0) + .setup_port(1) + .setup_access_flags(AccessFlags::LocalWrite | AccessFlags::RemoteWrite); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::Init, qp.state()); + + // modify QP to RTR state, set dest qp as itself + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToReceive) + .setup_path_mtu(Mtu::Mtu1024) + .setup_dest_qp_num(qp.qp_number()) + .setup_rq_psn(1) + .setup_max_dest_read_atomic(0) + .setup_min_rnr_timer(0); + // setup address vector + let mut ah_attr = AddressHandleAttribute::new(); + let gid_entries = ctx.query_gid_table().unwrap(); + + ah_attr + .setup_dest_lid(1) + .setup_port(1) + .setup_service_level(1) + .setup_grh_src_gid_index(gid_entries[0].gid_index().try_into().unwrap()) + .setup_grh_dest_gid(&gid_entries[0].gid()) + .setup_grh_hop_limit(64); + attr.setup_address_vector(&ah_attr); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToReceive, qp.state()); + + // modify QP to RTS state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToSend) + .setup_sq_psn(1) + .setup_timeout(12) + .setup_retry_cnt(7) + .setup_rnr_retry(7) + .setup_max_read_atomic(0); + + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToSend, qp.state()); + + let mut guard = qp.start_post_send(); + + let wr = guard.construct_wr(233, WorkRequestFlags::Signaled); + + // while holding a write handle, we can't build a send handle at the same time + let _wr_2 = guard.construct_wr(2, 0.into()); + + let _write_handle = wr.setup_write(mr.rkey(), mr.buf.data.as_ptr() as _); + } + } + + Ok(()) +} diff --git a/tests/post_send_guard/one_guard_has_only_one_wr.stderr b/tests/post_send_guard/one_guard_has_only_one_wr.stderr new file mode 100644 index 0000000..ef1506f --- /dev/null +++ b/tests/post_send_guard/one_guard_has_only_one_wr.stderr @@ -0,0 +1,11 @@ +error[E0499]: cannot borrow `guard` as mutable more than once at a time + --> tests/post_send_guard/one_guard_has_only_one_wr.rs:86:25 + | +83 | let wr = guard.construct_wr(233, WorkRequestFlags::Signaled); + | ----- first mutable borrow occurs here +... +86 | let _wr_2 = guard.construct_wr(2, 0.into()); + | ^^^^^ second mutable borrow occurs here +87 | +88 | let _write_handle = wr.setup_write(mr.rkey(), mr.buf.data.as_ptr() as _); + | -- first borrow later used here diff --git a/tests/post_send_guard/one_qp_has_only_one_guard.rs b/tests/post_send_guard/one_qp_has_only_one_guard.rs new file mode 100644 index 0000000..0c66fac --- /dev/null +++ b/tests/post_send_guard/one_qp_has_only_one_guard.rs @@ -0,0 +1,91 @@ +use sideway::verbs::{ + address::AddressHandleAttribute, + device, + device_context::Mtu, + queue_pair::{PostSendGuard, QueuePair, QueuePairAttribute, QueuePairState}, + AccessFlags, +}; + +fn main() -> Result<(), Box> { + let device_list = device::DeviceList::new()?; + for device in &device_list { + let ctx = device.open().unwrap(); + + let pd = ctx.alloc_pd().unwrap(); + let _mr = pd.reg_managed_mr(64).unwrap(); + + let _comp_channel = ctx.create_comp_channel().unwrap(); + let mut cq_builder = ctx.create_cq_builder(); + let sq = cq_builder.setup_cqe(128).build_ex().unwrap(); + let rq = cq_builder.setup_cqe(128).build_ex().unwrap(); + + let mut builder = pd.create_qp_builder(); + + // block for extended qp + { + let mut qp = builder + .setup_max_inline_data(128) + .setup_send_cq(&sq) + .setup_recv_cq(&rq) + .build_ex() + .unwrap(); + + println!("qp pointer is {:?}", qp); + // modify QP to INIT state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::Init) + .setup_pkey_index(0) + .setup_port(1) + .setup_access_flags(AccessFlags::LocalWrite | AccessFlags::RemoteWrite); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::Init, qp.state()); + + // modify QP to RTR state, set dest qp as itself + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToReceive) + .setup_path_mtu(Mtu::Mtu1024) + .setup_dest_qp_num(qp.qp_number()) + .setup_rq_psn(1) + .setup_max_dest_read_atomic(0) + .setup_min_rnr_timer(0); + // setup address vector + let mut ah_attr = AddressHandleAttribute::new(); + let gid_entries = ctx.query_gid_table().unwrap(); + + ah_attr + .setup_dest_lid(1) + .setup_port(1) + .setup_service_level(1) + .setup_grh_src_gid_index(gid_entries[0].gid_index().try_into().unwrap()) + .setup_grh_dest_gid(&gid_entries[0].gid()) + .setup_grh_hop_limit(64); + attr.setup_address_vector(&ah_attr); + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToReceive, qp.state()); + + // modify QP to RTS state + let mut attr = QueuePairAttribute::new(); + attr.setup_state(QueuePairState::ReadyToSend) + .setup_sq_psn(1) + .setup_timeout(12) + .setup_retry_cnt(7) + .setup_rnr_retry(7) + .setup_max_read_atomic(0); + + qp.modify(&attr).unwrap(); + + assert_eq!(QueuePairState::ReadyToSend, qp.state()); + + let guard = qp.start_post_send(); + + // while holding a post send guard, we can't build a post send guard at the same time + let _guard_2 = qp.start_post_send(); + + let _res = guard.post().unwrap(); + } + } + + Ok(()) +} diff --git a/tests/post_send_guard/one_qp_has_only_one_guard.stderr b/tests/post_send_guard/one_qp_has_only_one_guard.stderr new file mode 100644 index 0000000..e477eb1 --- /dev/null +++ b/tests/post_send_guard/one_qp_has_only_one_guard.stderr @@ -0,0 +1,11 @@ +error[E0499]: cannot borrow `qp` as mutable more than once at a time + --> tests/post_send_guard/one_qp_has_only_one_guard.rs:84:28 + | +81 | let guard = qp.start_post_send(); + | -- first mutable borrow occurs here +... +84 | let _guard_2 = qp.start_post_send(); + | ^^ second mutable borrow occurs here +85 | +86 | let _res = guard.post().unwrap(); + | ----- first borrow later used here