Skip to content

Commit

Permalink
Test: add layout test check Accounts with [repr(packed)]
Browse files Browse the repository at this point in the history
Due to the changes in Rust 1.80.0, where the fields in struct marked with [repr(packed)] might be reordered.
There is necessary to add tests to confirm whether this change will impact the layout of these accounts.
More details about the Rust changes at: rust-lang/rust#125360.
  • Loading branch information
RainRaydium committed Oct 24, 2024
1 parent e5783e8 commit 91acc9e
Show file tree
Hide file tree
Showing 8 changed files with 706 additions and 15 deletions.
34 changes: 26 additions & 8 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 2 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
[workspace]
members = [
"programs/*",
"client",
]
resolver = "2"
members = ["programs/*", "client"]

[profile.test]
opt-level = 0
Expand Down
6 changes: 3 additions & 3 deletions programs/amm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ paramset = []

[dependencies]
anchor-lang = { version = "0.29.0", features = ["init-if-needed"] }
anchor-spl = {version = "0.29.0", features = ["metadata"]}
anchor-spl = { version = "0.29.0", features = ["metadata"] }
solana-program = "<1.17.0"
spl-memo = "4.0.0"
uint = { git = "https://github.com/raydium-io/parity-common", package = "uint" }
mpl-token-metadata = { version = "^1.11.0", features = ["no-entrypoint"] }
bytemuck = { version = "1.4.0", features = ["derive", "min_const_generics"] }
bytemuck = { version = "1.19.0", features = ["derive", "min_const_generics"] }
arrayref = { version = "0.3.6" }
solana-security-txt = "1.1.1"

Expand All @@ -46,4 +46,4 @@ overflow-checks = true
[profile.release.build-override]
opt-level = 3
incremental = false
codegen-units = 1
codegen-units = 1
51 changes: 51 additions & 0 deletions programs/amm/src/states/operation_account.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,4 +212,55 @@ mod test {
operation_state.remove_operation_owner(keys.clone());
println!("{:?}", operation_state.operation_owners);
}

#[test]
fn operation_layout_test() {
use anchor_lang::Discriminator;

let bump: u8 = 0x12;
let operation_owners: [Pubkey; OPERATION_SIZE_USIZE] =
std::array::from_fn(|_| Pubkey::new_unique());
let whitelist_mints: [Pubkey; WHITE_MINT_SIZE_USIZE] =
std::array::from_fn(|_| Pubkey::new_unique());

// serialize original data
let mut operation_data =
[0u8; 8 + 1 + 32 * OPERATION_SIZE_USIZE + 32 * WHITE_MINT_SIZE_USIZE];
let mut offset = 0;
operation_data[offset..offset + 8].copy_from_slice(&OperationState::discriminator());
offset += 8;
operation_data[offset..offset + 1].copy_from_slice(&bump.to_le_bytes());
offset += 1;
for i in 0..OPERATION_SIZE_USIZE {
operation_data[offset..offset + 32].copy_from_slice(&operation_owners[i].to_bytes());
offset += 32;
}
for i in 0..WHITE_MINT_SIZE_USIZE {
operation_data[offset..offset + 32].copy_from_slice(&whitelist_mints[i].to_bytes());
offset += 32;
}

// len check
assert_eq!(offset, operation_data.len());
assert_eq!(
operation_data.len(),
core::mem::size_of::<OperationState>() + 8
);

// deserialize original data
let unpack_data: &OperationState =
bytemuck::from_bytes(&operation_data[8..core::mem::size_of::<OperationState>() + 8]);

// data check
let unpack_bump = unpack_data.bump;
assert_eq!(unpack_bump, bump);
for i in 0..OPERATION_SIZE_USIZE {
let unpack_operation_owners = unpack_data.operation_owners[i];
assert_eq!(unpack_operation_owners, operation_owners[i]);
}
for i in 0..WHITE_MINT_SIZE_USIZE {
let unpack_whitelist_mints = unpack_data.whitelist_mints[i];
assert_eq!(unpack_whitelist_mints, whitelist_mints[i]);
}
}
}
114 changes: 114 additions & 0 deletions programs/amm/src/states/oracle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,3 +121,117 @@ pub fn block_timestamp_mock() -> u64 {
.unwrap()
.as_secs()
}

#[cfg(test)]
pub mod oracle_layout_test {
use super::*;
use anchor_lang::Discriminator;
#[test]
fn test_observation_layout() {
let initialized = true;
let recent_epoch: u64 = 0x123456789abcdef0;
let observation_index: u16 = 0x1122;
let pool_id: Pubkey = Pubkey::new_unique();
let padding: [u64; 4] = [
0x123456789abcde0f,
0x123456789abcd0ef,
0x123456789abc0def,
0x123456789ab0cdef,
];

let mut observation_datas = [0u8; Observation::LEN * OBSERVATION_NUM];
let mut observations = [Observation::default(); OBSERVATION_NUM];
let mut offset = 0;
for i in 0..OBSERVATION_NUM {
let index = i + 1;
let block_timestamp: u32 = u32::MAX - 3 * index as u32;
let tick_cumulative: i64 = i64::MAX - 3 * index as i64;
let padding: [u64; 4] = [
u64::MAX - index as u64,
u64::MAX - 2 * index as u64,
u64::MAX - 3 * index as u64,
u64::MAX - 4 * index as u64,
];
observations[i].block_timestamp = block_timestamp;
observations[i].tick_cumulative = tick_cumulative;
observations[i].padding = padding;
observation_datas[offset..offset + 4].copy_from_slice(&block_timestamp.to_le_bytes());
offset += 4;
observation_datas[offset..offset + 8].copy_from_slice(&tick_cumulative.to_le_bytes());
offset += 8;
observation_datas[offset..offset + 8].copy_from_slice(&padding[0].to_le_bytes());
offset += 8;
observation_datas[offset..offset + 8].copy_from_slice(&padding[1].to_le_bytes());
offset += 8;
observation_datas[offset..offset + 8].copy_from_slice(&padding[2].to_le_bytes());
offset += 8;
observation_datas[offset..offset + 8].copy_from_slice(&padding[3].to_le_bytes());
offset += 8;
}

// serialize original data
let mut observation_state_data = [0u8; ObservationState::LEN];
let mut offset = 0;
observation_state_data[offset..offset + 8]
.copy_from_slice(&ObservationState::discriminator());
offset += 8;
observation_state_data[offset..offset + 1]
.copy_from_slice(&(initialized as u8).to_le_bytes());
offset += 1;
observation_state_data[offset..offset + 8].copy_from_slice(&recent_epoch.to_le_bytes());
offset += 8;
observation_state_data[offset..offset + 2]
.copy_from_slice(&observation_index.to_le_bytes());
offset += 2;
observation_state_data[offset..offset + 32].copy_from_slice(&pool_id.to_bytes());
offset += 32;
observation_state_data[offset..offset + Observation::LEN * OBSERVATION_NUM]
.copy_from_slice(&observation_datas);
offset += Observation::LEN * OBSERVATION_NUM;
observation_state_data[offset..offset + 8].copy_from_slice(&padding[0].to_le_bytes());
offset += 8;
observation_state_data[offset..offset + 8].copy_from_slice(&padding[1].to_le_bytes());
offset += 8;
observation_state_data[offset..offset + 8].copy_from_slice(&padding[2].to_le_bytes());
offset += 8;
observation_state_data[offset..offset + 8].copy_from_slice(&padding[3].to_le_bytes());
offset += 8;
// len check
assert_eq!(offset, observation_state_data.len());
assert_eq!(
observation_state_data.len(),
core::mem::size_of::<ObservationState>() + 8
);

// deserialize original data
let unpack_data: &ObservationState = bytemuck::from_bytes(
&observation_state_data[8..core::mem::size_of::<ObservationState>() + 8],
);

// data check
let unpack_initialized = unpack_data.initialized;
assert_eq!(unpack_initialized, initialized);
let unpack_recent_epoch = unpack_data.recent_epoch;
assert_eq!(unpack_recent_epoch, recent_epoch);
let unpack_observation_index = unpack_data.observation_index;
assert_eq!(unpack_observation_index, observation_index);
let unpack_pool_id = unpack_data.pool_id;
assert_eq!(unpack_pool_id, pool_id);
let unpack_padding = unpack_data.padding;
assert_eq!(unpack_padding, padding);
for (observation, unpack_observation) in
observations.iter().zip(unpack_data.observations.iter())
{
let block_timestamp = observation.block_timestamp;
let tick_cumulative = observation.tick_cumulative;
let padding = observation.padding;

let unpack_block_timestamp = unpack_observation.block_timestamp;
let unpack_tick_cumulative = unpack_observation.tick_cumulative;
let unpack_padding = unpack_observation.padding;
assert_eq!(block_timestamp, unpack_block_timestamp);
assert_eq!(tick_cumulative, unpack_tick_cumulative);
assert_eq!(padding, unpack_padding);
}
}
}
Loading

0 comments on commit 91acc9e

Please sign in to comment.