Skip to content

Commit fed65ae

Browse files
committed
Move entirely to array-based SIMD
See MCP#621 This tries to make as few changes as possible -- it keeps the `new` functions taking all the parameters, for example.
1 parent 876b08c commit fed65ae

File tree

22 files changed

+593
-717
lines changed

22 files changed

+593
-717
lines changed

crates/core_arch/src/aarch64/neon/mod.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@ use stdarch_test::assert_instr;
2323
types! {
2424
/// ARM-specific 64-bit wide vector of one packed `f64`.
2525
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26-
pub struct float64x1_t(f64); // FIXME: check this!
26+
pub struct float64x1_t(1 x f64); // FIXME: check this!
2727
/// ARM-specific 128-bit wide vector of two packed `f64`.
2828
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29-
pub struct float64x2_t(f64, f64);
29+
pub struct float64x2_t(2 x f64);
3030
}
3131

3232
/// ARM-specific type containing two `float64x1_t` vectors.
@@ -1061,7 +1061,7 @@ pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t {
10611061
#[cfg_attr(test, assert_instr(bsl))]
10621062
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10631063
pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
1064-
let not = int64x1_t(-1);
1064+
let not = int64x1_t([-1; 1]);
10651065
transmute(simd_or(
10661066
simd_and(a, transmute(b)),
10671067
simd_and(simd_xor(a, transmute(not)), transmute(c)),
@@ -1073,7 +1073,7 @@ pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64
10731073
#[cfg_attr(test, assert_instr(bsl))]
10741074
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10751075
pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t {
1076-
let not = int64x1_t(-1);
1076+
let not = int64x1_t([-1; 1]);
10771077
simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c))
10781078
}
10791079
/// Bitwise Select. (128-bit)
@@ -1082,7 +1082,7 @@ pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_
10821082
#[cfg_attr(test, assert_instr(bsl))]
10831083
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10841084
pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
1085-
let not = int64x2_t(-1, -1);
1085+
let not = int64x2_t([-1; 2]);
10861086
transmute(simd_or(
10871087
simd_and(a, transmute(b)),
10881088
simd_and(simd_xor(a, transmute(not)), transmute(c)),
@@ -1094,7 +1094,7 @@ pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float6
10941094
#[cfg_attr(test, assert_instr(bsl))]
10951095
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10961096
pub unsafe fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t {
1097-
let not = int64x2_t(-1, -1);
1097+
let not = int64x2_t([-1; 2]);
10981098
simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c))
10991099
}
11001100

@@ -1994,7 +1994,7 @@ pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t {
19941994
#[cfg_attr(test, assert_instr(dup))]
19951995
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19961996
pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t {
1997-
float64x2_t(value, value)
1997+
float64x2_t([value; 2])
19981998
}
19991999

20002000
/// Duplicate vector element to vector or scalar

crates/core_arch/src/arm/dsp.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ use crate::mem::transmute;
2828
types! {
2929
/// ARM-specific 32-bit wide vector of two packed `i16`.
3030
#[unstable(feature = "stdarch_arm_dsp", issue = "117237")]
31-
pub struct int16x2_t(i16, i16);
31+
pub struct int16x2_t(2 x i16);
3232
/// ARM-specific 32-bit wide vector of two packed `u16`.
3333
#[unstable(feature = "stdarch_arm_dsp", issue = "117237")]
34-
pub struct uint16x2_t(u16, u16);
34+
pub struct uint16x2_t(2 x u16);
3535
}
3636

3737
extern "unadjusted" {

0 commit comments

Comments
 (0)